1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34760285e7SDavid Howells #include <drm/drmP.h> 35760285e7SDavid Howells #include <drm/i915_drm.h> 36c0e09200SDave Airlie #include "i915_drv.h" 371c5d22f7SChris Wilson #include "i915_trace.h" 3879e53945SJesse Barnes #include "intel_drv.h" 39c0e09200SDave Airlie 40e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 41e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 42e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46e5868a31SEgbert Eich }; 47e5868a31SEgbert Eich 48e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 49e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 5073c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54e5868a31SEgbert Eich }; 55e5868a31SEgbert Eich 56e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 57e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63e5868a31SEgbert Eich }; 64e5868a31SEgbert Eich 65e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 66e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72e5868a31SEgbert Eich }; 73e5868a31SEgbert Eich 74e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81e5868a31SEgbert Eich }; 82e5868a31SEgbert Eich 83036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 84995b6762SChris Wilson static void 85f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86036a4a7dSZhenyu Wang { 874bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 884bc9d430SDaniel Vetter 89c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 90c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 91c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr &= ~mask; 92c67a470bSPaulo Zanoni return; 93c67a470bSPaulo Zanoni } 94c67a470bSPaulo Zanoni 951ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 961ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 971ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 983143a2bfSChris Wilson POSTING_READ(DEIMR); 99036a4a7dSZhenyu Wang } 100036a4a7dSZhenyu Wang } 101036a4a7dSZhenyu Wang 1020ff9800aSPaulo Zanoni static void 103f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104036a4a7dSZhenyu Wang { 1054bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1064bc9d430SDaniel Vetter 107c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 108c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 109c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr |= mask; 110c67a470bSPaulo Zanoni return; 111c67a470bSPaulo Zanoni } 112c67a470bSPaulo Zanoni 1131ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1141ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1151ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1163143a2bfSChris Wilson POSTING_READ(DEIMR); 117036a4a7dSZhenyu Wang } 118036a4a7dSZhenyu Wang } 119036a4a7dSZhenyu Wang 12043eaea13SPaulo Zanoni /** 12143eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 12243eaea13SPaulo Zanoni * @dev_priv: driver private 12343eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 12443eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 12543eaea13SPaulo Zanoni */ 12643eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 12743eaea13SPaulo Zanoni uint32_t interrupt_mask, 12843eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 12943eaea13SPaulo Zanoni { 13043eaea13SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 13143eaea13SPaulo Zanoni 132c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 133c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 134c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136c67a470bSPaulo Zanoni interrupt_mask); 137c67a470bSPaulo Zanoni return; 138c67a470bSPaulo Zanoni } 139c67a470bSPaulo Zanoni 14043eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 14143eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 14243eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 14343eaea13SPaulo Zanoni POSTING_READ(GTIMR); 14443eaea13SPaulo Zanoni } 14543eaea13SPaulo Zanoni 14643eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 14743eaea13SPaulo Zanoni { 14843eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 14943eaea13SPaulo Zanoni } 15043eaea13SPaulo Zanoni 15143eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 15243eaea13SPaulo Zanoni { 15343eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 15443eaea13SPaulo Zanoni } 15543eaea13SPaulo Zanoni 156edbfdb45SPaulo Zanoni /** 157edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 158edbfdb45SPaulo Zanoni * @dev_priv: driver private 159edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 160edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 161edbfdb45SPaulo Zanoni */ 162edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 164edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 165edbfdb45SPaulo Zanoni { 166605cd25bSPaulo Zanoni uint32_t new_val; 167edbfdb45SPaulo Zanoni 168edbfdb45SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 169edbfdb45SPaulo Zanoni 170c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 171c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 172c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174c67a470bSPaulo Zanoni interrupt_mask); 175c67a470bSPaulo Zanoni return; 176c67a470bSPaulo Zanoni } 177c67a470bSPaulo Zanoni 178605cd25bSPaulo Zanoni new_val = dev_priv->pm_irq_mask; 179f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 180f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 181f52ecbcfSPaulo Zanoni 182605cd25bSPaulo Zanoni if (new_val != dev_priv->pm_irq_mask) { 183605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = new_val; 184605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185edbfdb45SPaulo Zanoni POSTING_READ(GEN6_PMIMR); 186edbfdb45SPaulo Zanoni } 187f52ecbcfSPaulo Zanoni } 188edbfdb45SPaulo Zanoni 189edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190edbfdb45SPaulo Zanoni { 191edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 192edbfdb45SPaulo Zanoni } 193edbfdb45SPaulo Zanoni 194edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195edbfdb45SPaulo Zanoni { 196edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, 0); 197edbfdb45SPaulo Zanoni } 198edbfdb45SPaulo Zanoni 1998664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 2008664281bSPaulo Zanoni { 2018664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2028664281bSPaulo Zanoni struct intel_crtc *crtc; 2038664281bSPaulo Zanoni enum pipe pipe; 2048664281bSPaulo Zanoni 2054bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2064bc9d430SDaniel Vetter 2078664281bSPaulo Zanoni for_each_pipe(pipe) { 2088664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2098664281bSPaulo Zanoni 2108664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 2118664281bSPaulo Zanoni return false; 2128664281bSPaulo Zanoni } 2138664281bSPaulo Zanoni 2148664281bSPaulo Zanoni return true; 2158664281bSPaulo Zanoni } 2168664281bSPaulo Zanoni 2178664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 2188664281bSPaulo Zanoni { 2198664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2208664281bSPaulo Zanoni enum pipe pipe; 2218664281bSPaulo Zanoni struct intel_crtc *crtc; 2228664281bSPaulo Zanoni 223fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 224fee884edSDaniel Vetter 2258664281bSPaulo Zanoni for_each_pipe(pipe) { 2268664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2278664281bSPaulo Zanoni 2288664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 2298664281bSPaulo Zanoni return false; 2308664281bSPaulo Zanoni } 2318664281bSPaulo Zanoni 2328664281bSPaulo Zanoni return true; 2338664281bSPaulo Zanoni } 2348664281bSPaulo Zanoni 2358664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 2368664281bSPaulo Zanoni enum pipe pipe, bool enable) 2378664281bSPaulo Zanoni { 2388664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2398664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 2408664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 2418664281bSPaulo Zanoni 2428664281bSPaulo Zanoni if (enable) 2438664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2448664281bSPaulo Zanoni else 2458664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2468664281bSPaulo Zanoni } 2478664281bSPaulo Zanoni 2488664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 2497336df65SDaniel Vetter enum pipe pipe, bool enable) 2508664281bSPaulo Zanoni { 2518664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2528664281bSPaulo Zanoni if (enable) { 2537336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 2547336df65SDaniel Vetter 2558664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 2568664281bSPaulo Zanoni return; 2578664281bSPaulo Zanoni 2588664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 2598664281bSPaulo Zanoni } else { 2607336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 2617336df65SDaniel Vetter 2627336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 2638664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 2647336df65SDaniel Vetter 2657336df65SDaniel Vetter if (!was_enabled && 2667336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 2677336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 2687336df65SDaniel Vetter pipe_name(pipe)); 2697336df65SDaniel Vetter } 2708664281bSPaulo Zanoni } 2718664281bSPaulo Zanoni } 2728664281bSPaulo Zanoni 273fee884edSDaniel Vetter /** 274fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 275fee884edSDaniel Vetter * @dev_priv: driver private 276fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 277fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 278fee884edSDaniel Vetter */ 279fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 280fee884edSDaniel Vetter uint32_t interrupt_mask, 281fee884edSDaniel Vetter uint32_t enabled_irq_mask) 282fee884edSDaniel Vetter { 283fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 284fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 285fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 286fee884edSDaniel Vetter 287fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 288fee884edSDaniel Vetter 289c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled && 290c67a470bSPaulo Zanoni (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 291c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 292c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 293c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 294c67a470bSPaulo Zanoni interrupt_mask); 295c67a470bSPaulo Zanoni return; 296c67a470bSPaulo Zanoni } 297c67a470bSPaulo Zanoni 298fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 299fee884edSDaniel Vetter POSTING_READ(SDEIMR); 300fee884edSDaniel Vetter } 301fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 302fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 303fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 304fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 305fee884edSDaniel Vetter 306de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 307de28075dSDaniel Vetter enum transcoder pch_transcoder, 3088664281bSPaulo Zanoni bool enable) 3098664281bSPaulo Zanoni { 3108664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 311de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 312de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 3138664281bSPaulo Zanoni 3148664281bSPaulo Zanoni if (enable) 315fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 3168664281bSPaulo Zanoni else 317fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 3188664281bSPaulo Zanoni } 3198664281bSPaulo Zanoni 3208664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 3218664281bSPaulo Zanoni enum transcoder pch_transcoder, 3228664281bSPaulo Zanoni bool enable) 3238664281bSPaulo Zanoni { 3248664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3258664281bSPaulo Zanoni 3268664281bSPaulo Zanoni if (enable) { 3271dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 3281dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 3291dd246fbSDaniel Vetter 3308664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 3318664281bSPaulo Zanoni return; 3328664281bSPaulo Zanoni 333fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3348664281bSPaulo Zanoni } else { 3351dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 3361dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 3371dd246fbSDaniel Vetter 3381dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 339fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3401dd246fbSDaniel Vetter 3411dd246fbSDaniel Vetter if (!was_enabled && 3421dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 3431dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 3441dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 3451dd246fbSDaniel Vetter } 3468664281bSPaulo Zanoni } 3478664281bSPaulo Zanoni } 3488664281bSPaulo Zanoni 3498664281bSPaulo Zanoni /** 3508664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 3518664281bSPaulo Zanoni * @dev: drm device 3528664281bSPaulo Zanoni * @pipe: pipe 3538664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3548664281bSPaulo Zanoni * 3558664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 3568664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 3578664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 3588664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 3598664281bSPaulo Zanoni * bit for all the pipes. 3608664281bSPaulo Zanoni * 3618664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3628664281bSPaulo Zanoni */ 3638664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 3648664281bSPaulo Zanoni enum pipe pipe, bool enable) 3658664281bSPaulo Zanoni { 3668664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3678664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 3688664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3698664281bSPaulo Zanoni unsigned long flags; 3708664281bSPaulo Zanoni bool ret; 3718664281bSPaulo Zanoni 3728664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3738664281bSPaulo Zanoni 3748664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 3758664281bSPaulo Zanoni 3768664281bSPaulo Zanoni if (enable == ret) 3778664281bSPaulo Zanoni goto done; 3788664281bSPaulo Zanoni 3798664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 3808664281bSPaulo Zanoni 3818664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 3828664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 3838664281bSPaulo Zanoni else if (IS_GEN7(dev)) 3847336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 3858664281bSPaulo Zanoni 3868664281bSPaulo Zanoni done: 3878664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3888664281bSPaulo Zanoni return ret; 3898664281bSPaulo Zanoni } 3908664281bSPaulo Zanoni 3918664281bSPaulo Zanoni /** 3928664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 3938664281bSPaulo Zanoni * @dev: drm device 3948664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 3958664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3968664281bSPaulo Zanoni * 3978664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 3988664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 3998664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 4008664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 4018664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 4028664281bSPaulo Zanoni * 4038664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 4048664281bSPaulo Zanoni */ 4058664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 4068664281bSPaulo Zanoni enum transcoder pch_transcoder, 4078664281bSPaulo Zanoni bool enable) 4088664281bSPaulo Zanoni { 4098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 410de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 411de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4128664281bSPaulo Zanoni unsigned long flags; 4138664281bSPaulo Zanoni bool ret; 4148664281bSPaulo Zanoni 415de28075dSDaniel Vetter /* 416de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 417de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 418de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 419de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 420de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 421de28075dSDaniel Vetter * crtc on LPT won't cause issues. 422de28075dSDaniel Vetter */ 4238664281bSPaulo Zanoni 4248664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 4258664281bSPaulo Zanoni 4268664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 4278664281bSPaulo Zanoni 4288664281bSPaulo Zanoni if (enable == ret) 4298664281bSPaulo Zanoni goto done; 4308664281bSPaulo Zanoni 4318664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 4328664281bSPaulo Zanoni 4338664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 434de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4358664281bSPaulo Zanoni else 4368664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4378664281bSPaulo Zanoni 4388664281bSPaulo Zanoni done: 4398664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4408664281bSPaulo Zanoni return ret; 4418664281bSPaulo Zanoni } 4428664281bSPaulo Zanoni 4438664281bSPaulo Zanoni 4447c463586SKeith Packard void 4457c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4467c463586SKeith Packard { 4479db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 44846c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4497c463586SKeith Packard 450b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 451b79480baSDaniel Vetter 45246c06a30SVille Syrjälä if ((pipestat & mask) == mask) 45346c06a30SVille Syrjälä return; 45446c06a30SVille Syrjälä 4557c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 45646c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 45746c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4583143a2bfSChris Wilson POSTING_READ(reg); 4597c463586SKeith Packard } 4607c463586SKeith Packard 4617c463586SKeith Packard void 4627c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4637c463586SKeith Packard { 4649db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 46546c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4667c463586SKeith Packard 467b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 468b79480baSDaniel Vetter 46946c06a30SVille Syrjälä if ((pipestat & mask) == 0) 47046c06a30SVille Syrjälä return; 47146c06a30SVille Syrjälä 47246c06a30SVille Syrjälä pipestat &= ~mask; 47346c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4743143a2bfSChris Wilson POSTING_READ(reg); 4757c463586SKeith Packard } 4767c463586SKeith Packard 477c0e09200SDave Airlie /** 478f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 47901c66889SZhao Yakui */ 480f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 48101c66889SZhao Yakui { 4821ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 4831ec14ad3SChris Wilson unsigned long irqflags; 4841ec14ad3SChris Wilson 485f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 486f49e38ddSJani Nikula return; 487f49e38ddSJani Nikula 4881ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 48901c66889SZhao Yakui 490f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 491a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 492f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 4931ec14ad3SChris Wilson 4941ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 49501c66889SZhao Yakui } 49601c66889SZhao Yakui 49701c66889SZhao Yakui /** 4980a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 4990a3e67a4SJesse Barnes * @dev: DRM device 5000a3e67a4SJesse Barnes * @pipe: pipe to check 5010a3e67a4SJesse Barnes * 5020a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 5030a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 5040a3e67a4SJesse Barnes * before reading such registers if unsure. 5050a3e67a4SJesse Barnes */ 5060a3e67a4SJesse Barnes static int 5070a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 5080a3e67a4SJesse Barnes { 5090a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 510702e7a56SPaulo Zanoni 511a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 512a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 513a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 514a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 51571f8ba6bSPaulo Zanoni 516a01025afSDaniel Vetter return intel_crtc->active; 517a01025afSDaniel Vetter } else { 518a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 519a01025afSDaniel Vetter } 5200a3e67a4SJesse Barnes } 5210a3e67a4SJesse Barnes 5224cdb83ecSVille Syrjälä static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 5234cdb83ecSVille Syrjälä { 5244cdb83ecSVille Syrjälä /* Gen2 doesn't have a hardware frame counter */ 5254cdb83ecSVille Syrjälä return 0; 5264cdb83ecSVille Syrjälä } 5274cdb83ecSVille Syrjälä 52842f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 52942f52ef8SKeith Packard * we use as a pipe index 53042f52ef8SKeith Packard */ 531f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 5320a3e67a4SJesse Barnes { 5330a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5340a3e67a4SJesse Barnes unsigned long high_frame; 5350a3e67a4SJesse Barnes unsigned long low_frame; 536391f75e2SVille Syrjälä u32 high1, high2, low, pixel, vbl_start; 5370a3e67a4SJesse Barnes 5380a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 53944d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5409db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5410a3e67a4SJesse Barnes return 0; 5420a3e67a4SJesse Barnes } 5430a3e67a4SJesse Barnes 544391f75e2SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 545391f75e2SVille Syrjälä struct intel_crtc *intel_crtc = 546391f75e2SVille Syrjälä to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 547391f75e2SVille Syrjälä const struct drm_display_mode *mode = 548391f75e2SVille Syrjälä &intel_crtc->config.adjusted_mode; 549391f75e2SVille Syrjälä 550391f75e2SVille Syrjälä vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 551391f75e2SVille Syrjälä } else { 552391f75e2SVille Syrjälä enum transcoder cpu_transcoder = 553391f75e2SVille Syrjälä intel_pipe_to_cpu_transcoder(dev_priv, pipe); 554391f75e2SVille Syrjälä u32 htotal; 555391f75e2SVille Syrjälä 556391f75e2SVille Syrjälä htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 557391f75e2SVille Syrjälä vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 558391f75e2SVille Syrjälä 559391f75e2SVille Syrjälä vbl_start *= htotal; 560391f75e2SVille Syrjälä } 561391f75e2SVille Syrjälä 5629db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 5639db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 5645eddb70bSChris Wilson 5650a3e67a4SJesse Barnes /* 5660a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 5670a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 5680a3e67a4SJesse Barnes * register. 5690a3e67a4SJesse Barnes */ 5700a3e67a4SJesse Barnes do { 5715eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 572391f75e2SVille Syrjälä low = I915_READ(low_frame); 5735eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5740a3e67a4SJesse Barnes } while (high1 != high2); 5750a3e67a4SJesse Barnes 5765eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 577391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 5785eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 579391f75e2SVille Syrjälä 580391f75e2SVille Syrjälä /* 581391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 582391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 583391f75e2SVille Syrjälä * counter against vblank start. 584391f75e2SVille Syrjälä */ 585391f75e2SVille Syrjälä return ((high1 << 8) | low) + (pixel >= vbl_start); 5860a3e67a4SJesse Barnes } 5870a3e67a4SJesse Barnes 588f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 5899880b7a5SJesse Barnes { 5909880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5919db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 5929880b7a5SJesse Barnes 5939880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 59444d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5959db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5969880b7a5SJesse Barnes return 0; 5979880b7a5SJesse Barnes } 5989880b7a5SJesse Barnes 5999880b7a5SJesse Barnes return I915_READ(reg); 6009880b7a5SJesse Barnes } 6019880b7a5SJesse Barnes 6027c06b08aSVille Syrjälä static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe) 60354ddcbd2SVille Syrjälä { 60454ddcbd2SVille Syrjälä struct drm_i915_private *dev_priv = dev->dev_private; 60554ddcbd2SVille Syrjälä uint32_t status; 60654ddcbd2SVille Syrjälä 60754ddcbd2SVille Syrjälä if (IS_VALLEYVIEW(dev)) { 60854ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 60954ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 61054ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 61154ddcbd2SVille Syrjälä 61254ddcbd2SVille Syrjälä return I915_READ(VLV_ISR) & status; 6137c06b08aSVille Syrjälä } else if (IS_GEN2(dev)) { 6147c06b08aSVille Syrjälä status = pipe == PIPE_A ? 6157c06b08aSVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 6167c06b08aSVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 6177c06b08aSVille Syrjälä 6187c06b08aSVille Syrjälä return I915_READ16(ISR) & status; 6197c06b08aSVille Syrjälä } else if (INTEL_INFO(dev)->gen < 5) { 62054ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 62154ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 62254ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 62354ddcbd2SVille Syrjälä 62454ddcbd2SVille Syrjälä return I915_READ(ISR) & status; 62554ddcbd2SVille Syrjälä } else if (INTEL_INFO(dev)->gen < 7) { 62654ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 62754ddcbd2SVille Syrjälä DE_PIPEA_VBLANK : 62854ddcbd2SVille Syrjälä DE_PIPEB_VBLANK; 62954ddcbd2SVille Syrjälä 63054ddcbd2SVille Syrjälä return I915_READ(DEISR) & status; 63154ddcbd2SVille Syrjälä } else { 63254ddcbd2SVille Syrjälä switch (pipe) { 63354ddcbd2SVille Syrjälä default: 63454ddcbd2SVille Syrjälä case PIPE_A: 63554ddcbd2SVille Syrjälä status = DE_PIPEA_VBLANK_IVB; 63654ddcbd2SVille Syrjälä break; 63754ddcbd2SVille Syrjälä case PIPE_B: 63854ddcbd2SVille Syrjälä status = DE_PIPEB_VBLANK_IVB; 63954ddcbd2SVille Syrjälä break; 64054ddcbd2SVille Syrjälä case PIPE_C: 64154ddcbd2SVille Syrjälä status = DE_PIPEC_VBLANK_IVB; 64254ddcbd2SVille Syrjälä break; 64354ddcbd2SVille Syrjälä } 64454ddcbd2SVille Syrjälä 64554ddcbd2SVille Syrjälä return I915_READ(DEISR) & status; 64654ddcbd2SVille Syrjälä } 64754ddcbd2SVille Syrjälä } 64854ddcbd2SVille Syrjälä 649f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 6500af7e4dfSMario Kleiner int *vpos, int *hpos) 6510af7e4dfSMario Kleiner { 652c2baf4b7SVille Syrjälä struct drm_i915_private *dev_priv = dev->dev_private; 653c2baf4b7SVille Syrjälä struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 654c2baf4b7SVille Syrjälä struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 655c2baf4b7SVille Syrjälä const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 6563aa18df8SVille Syrjälä int position; 6570af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 6580af7e4dfSMario Kleiner bool in_vbl = true; 6590af7e4dfSMario Kleiner int ret = 0; 6600af7e4dfSMario Kleiner 661c2baf4b7SVille Syrjälä if (!intel_crtc->active) { 6620af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 6639db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 6640af7e4dfSMario Kleiner return 0; 6650af7e4dfSMario Kleiner } 6660af7e4dfSMario Kleiner 667c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 668c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 669c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 670c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 6710af7e4dfSMario Kleiner 672c2baf4b7SVille Syrjälä ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 673c2baf4b7SVille Syrjälä 6747c06b08aSVille Syrjälä if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 6750af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 6760af7e4dfSMario Kleiner * scanout position from Display scan line register. 6770af7e4dfSMario Kleiner */ 6787c06b08aSVille Syrjälä if (IS_GEN2(dev)) 6797c06b08aSVille Syrjälä position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 6807c06b08aSVille Syrjälä else 6817c06b08aSVille Syrjälä position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 68254ddcbd2SVille Syrjälä 68354ddcbd2SVille Syrjälä /* 68454ddcbd2SVille Syrjälä * The scanline counter increments at the leading edge 68554ddcbd2SVille Syrjälä * of hsync, ie. it completely misses the active portion 68654ddcbd2SVille Syrjälä * of the line. Fix up the counter at both edges of vblank 68754ddcbd2SVille Syrjälä * to get a more accurate picture whether we're in vblank 68854ddcbd2SVille Syrjälä * or not. 68954ddcbd2SVille Syrjälä */ 6907c06b08aSVille Syrjälä in_vbl = intel_pipe_in_vblank(dev, pipe); 69154ddcbd2SVille Syrjälä if ((in_vbl && position == vbl_start - 1) || 69254ddcbd2SVille Syrjälä (!in_vbl && position == vbl_end - 1)) 69354ddcbd2SVille Syrjälä position = (position + 1) % vtotal; 6940af7e4dfSMario Kleiner } else { 6950af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 6960af7e4dfSMario Kleiner * We can split this into vertical and horizontal 6970af7e4dfSMario Kleiner * scanout position. 6980af7e4dfSMario Kleiner */ 6990af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 7000af7e4dfSMario Kleiner 7013aa18df8SVille Syrjälä /* convert to pixel counts */ 7023aa18df8SVille Syrjälä vbl_start *= htotal; 7033aa18df8SVille Syrjälä vbl_end *= htotal; 7043aa18df8SVille Syrjälä vtotal *= htotal; 7053aa18df8SVille Syrjälä } 7063aa18df8SVille Syrjälä 7073aa18df8SVille Syrjälä in_vbl = position >= vbl_start && position < vbl_end; 7083aa18df8SVille Syrjälä 7093aa18df8SVille Syrjälä /* 7103aa18df8SVille Syrjälä * While in vblank, position will be negative 7113aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 7123aa18df8SVille Syrjälä * vblank, position will be positive counting 7133aa18df8SVille Syrjälä * up since vbl_end. 7143aa18df8SVille Syrjälä */ 7153aa18df8SVille Syrjälä if (position >= vbl_start) 7163aa18df8SVille Syrjälä position -= vbl_end; 7173aa18df8SVille Syrjälä else 7183aa18df8SVille Syrjälä position += vtotal - vbl_end; 7193aa18df8SVille Syrjälä 7207c06b08aSVille Syrjälä if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 7213aa18df8SVille Syrjälä *vpos = position; 7223aa18df8SVille Syrjälä *hpos = 0; 7233aa18df8SVille Syrjälä } else { 7240af7e4dfSMario Kleiner *vpos = position / htotal; 7250af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 7260af7e4dfSMario Kleiner } 7270af7e4dfSMario Kleiner 7280af7e4dfSMario Kleiner /* In vblank? */ 7290af7e4dfSMario Kleiner if (in_vbl) 7300af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 7310af7e4dfSMario Kleiner 7320af7e4dfSMario Kleiner return ret; 7330af7e4dfSMario Kleiner } 7340af7e4dfSMario Kleiner 735f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 7360af7e4dfSMario Kleiner int *max_error, 7370af7e4dfSMario Kleiner struct timeval *vblank_time, 7380af7e4dfSMario Kleiner unsigned flags) 7390af7e4dfSMario Kleiner { 7404041b853SChris Wilson struct drm_crtc *crtc; 7410af7e4dfSMario Kleiner 7427eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 7434041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 7440af7e4dfSMario Kleiner return -EINVAL; 7450af7e4dfSMario Kleiner } 7460af7e4dfSMario Kleiner 7470af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 7484041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 7494041b853SChris Wilson if (crtc == NULL) { 7504041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 7514041b853SChris Wilson return -EINVAL; 7524041b853SChris Wilson } 7534041b853SChris Wilson 7544041b853SChris Wilson if (!crtc->enabled) { 7554041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 7564041b853SChris Wilson return -EBUSY; 7574041b853SChris Wilson } 7580af7e4dfSMario Kleiner 7590af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 7604041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 7614041b853SChris Wilson vblank_time, flags, 7624041b853SChris Wilson crtc); 7630af7e4dfSMario Kleiner } 7640af7e4dfSMario Kleiner 76567c347ffSJani Nikula static bool intel_hpd_irq_event(struct drm_device *dev, 76667c347ffSJani Nikula struct drm_connector *connector) 767321a1b30SEgbert Eich { 768321a1b30SEgbert Eich enum drm_connector_status old_status; 769321a1b30SEgbert Eich 770321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 771321a1b30SEgbert Eich old_status = connector->status; 772321a1b30SEgbert Eich 773321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 77467c347ffSJani Nikula if (old_status == connector->status) 77567c347ffSJani Nikula return false; 77667c347ffSJani Nikula 77767c347ffSJani Nikula DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 778321a1b30SEgbert Eich connector->base.id, 779321a1b30SEgbert Eich drm_get_connector_name(connector), 78067c347ffSJani Nikula drm_get_connector_status_name(old_status), 78167c347ffSJani Nikula drm_get_connector_status_name(connector->status)); 78267c347ffSJani Nikula 78367c347ffSJani Nikula return true; 784321a1b30SEgbert Eich } 785321a1b30SEgbert Eich 7865ca58282SJesse Barnes /* 7875ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 7885ca58282SJesse Barnes */ 789ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 790ac4c16c5SEgbert Eich 7915ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 7925ca58282SJesse Barnes { 7935ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 7945ca58282SJesse Barnes hotplug_work); 7955ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 796c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 797cd569aedSEgbert Eich struct intel_connector *intel_connector; 798cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 799cd569aedSEgbert Eich struct drm_connector *connector; 800cd569aedSEgbert Eich unsigned long irqflags; 801cd569aedSEgbert Eich bool hpd_disabled = false; 802321a1b30SEgbert Eich bool changed = false; 803142e2398SEgbert Eich u32 hpd_event_bits; 8045ca58282SJesse Barnes 80552d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 80652d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 80752d7ecedSDaniel Vetter return; 80852d7ecedSDaniel Vetter 809a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 810e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 811e67189abSJesse Barnes 812cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 813142e2398SEgbert Eich 814142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 815142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 816cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 817cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 818cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 819cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 820cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 821cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 822cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 823cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 824cd569aedSEgbert Eich drm_get_connector_name(connector)); 825cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 826cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 827cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 828cd569aedSEgbert Eich hpd_disabled = true; 829cd569aedSEgbert Eich } 830142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 831142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 832142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 833142e2398SEgbert Eich } 834cd569aedSEgbert Eich } 835cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 836cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 837cd569aedSEgbert Eich * some connectors */ 838ac4c16c5SEgbert Eich if (hpd_disabled) { 839cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 840ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 841ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 842ac4c16c5SEgbert Eich } 843cd569aedSEgbert Eich 844cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 845cd569aedSEgbert Eich 846321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 847321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 848321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 849321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 850cd569aedSEgbert Eich if (intel_encoder->hot_plug) 851cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 852321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 853321a1b30SEgbert Eich changed = true; 854321a1b30SEgbert Eich } 855321a1b30SEgbert Eich } 85640ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 85740ee3381SKeith Packard 858321a1b30SEgbert Eich if (changed) 859321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 8605ca58282SJesse Barnes } 8615ca58282SJesse Barnes 862d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 863f97108d1SJesse Barnes { 864f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 865b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 8669270388eSDaniel Vetter u8 new_delay; 8679270388eSDaniel Vetter 868d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 869f97108d1SJesse Barnes 87073edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 87173edd18fSDaniel Vetter 87220e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 8739270388eSDaniel Vetter 8747648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 875b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 876b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 877f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 878f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 879f97108d1SJesse Barnes 880f97108d1SJesse Barnes /* Handle RCS change request from hw */ 881b5b72e89SMatthew Garrett if (busy_up > max_avg) { 88220e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 88320e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 88420e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 88520e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 886b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 88720e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 88820e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 88920e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 89020e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 891f97108d1SJesse Barnes } 892f97108d1SJesse Barnes 8937648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 89420e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 895f97108d1SJesse Barnes 896d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 8979270388eSDaniel Vetter 898f97108d1SJesse Barnes return; 899f97108d1SJesse Barnes } 900f97108d1SJesse Barnes 901549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 902549f7365SChris Wilson struct intel_ring_buffer *ring) 903549f7365SChris Wilson { 904475553deSChris Wilson if (ring->obj == NULL) 905475553deSChris Wilson return; 906475553deSChris Wilson 907814e9b57SChris Wilson trace_i915_gem_request_complete(ring); 9089862e600SChris Wilson 909549f7365SChris Wilson wake_up_all(&ring->irq_queue); 91010cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 911549f7365SChris Wilson } 912549f7365SChris Wilson 9134912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 9143b8d8d91SJesse Barnes { 9154912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 916c6a828d3SDaniel Vetter rps.work); 917edbfdb45SPaulo Zanoni u32 pm_iir; 918dd75fdc8SChris Wilson int new_delay, adj; 9193b8d8d91SJesse Barnes 92059cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 921c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 922c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 9234848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 924edbfdb45SPaulo Zanoni snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 92559cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 9264912d041SBen Widawsky 92760611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 92860611c13SPaulo Zanoni WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 92960611c13SPaulo Zanoni 9304848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 9313b8d8d91SJesse Barnes return; 9323b8d8d91SJesse Barnes 9334fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 9347b9e0ae6SChris Wilson 935dd75fdc8SChris Wilson adj = dev_priv->rps.last_adj; 9367425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 937dd75fdc8SChris Wilson if (adj > 0) 938dd75fdc8SChris Wilson adj *= 2; 939dd75fdc8SChris Wilson else 940dd75fdc8SChris Wilson adj = 1; 941dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 9427425034aSVille Syrjälä 9437425034aSVille Syrjälä /* 9447425034aSVille Syrjälä * For better performance, jump directly 9457425034aSVille Syrjälä * to RPe if we're below it. 9467425034aSVille Syrjälä */ 947dd75fdc8SChris Wilson if (new_delay < dev_priv->rps.rpe_delay) 9487425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 949dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 950dd75fdc8SChris Wilson if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 951dd75fdc8SChris Wilson new_delay = dev_priv->rps.rpe_delay; 952dd75fdc8SChris Wilson else 953dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 954dd75fdc8SChris Wilson adj = 0; 955dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 956dd75fdc8SChris Wilson if (adj < 0) 957dd75fdc8SChris Wilson adj *= 2; 958dd75fdc8SChris Wilson else 959dd75fdc8SChris Wilson adj = -1; 960dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 961dd75fdc8SChris Wilson } else { /* unknown event */ 962dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay; 963dd75fdc8SChris Wilson } 9643b8d8d91SJesse Barnes 96579249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 96679249636SBen Widawsky * interrupt 96779249636SBen Widawsky */ 968dd75fdc8SChris Wilson if (new_delay < (int)dev_priv->rps.min_delay) 969dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 970dd75fdc8SChris Wilson if (new_delay > (int)dev_priv->rps.max_delay) 971dd75fdc8SChris Wilson new_delay = dev_priv->rps.max_delay; 972dd75fdc8SChris Wilson dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 973dd75fdc8SChris Wilson 9740a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 9750a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 9760a073b84SJesse Barnes else 9774912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 9783b8d8d91SJesse Barnes 9794fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 9803b8d8d91SJesse Barnes } 9813b8d8d91SJesse Barnes 982e3689190SBen Widawsky 983e3689190SBen Widawsky /** 984e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 985e3689190SBen Widawsky * occurred. 986e3689190SBen Widawsky * @work: workqueue struct 987e3689190SBen Widawsky * 988e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 989e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 990e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 991e3689190SBen Widawsky */ 992e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 993e3689190SBen Widawsky { 994e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 995a4da4fa4SDaniel Vetter l3_parity.error_work); 996e3689190SBen Widawsky u32 error_status, row, bank, subbank; 99735a85ac6SBen Widawsky char *parity_event[6]; 998e3689190SBen Widawsky uint32_t misccpctl; 999e3689190SBen Widawsky unsigned long flags; 100035a85ac6SBen Widawsky uint8_t slice = 0; 1001e3689190SBen Widawsky 1002e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1003e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1004e3689190SBen Widawsky * any time we access those registers. 1005e3689190SBen Widawsky */ 1006e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 1007e3689190SBen Widawsky 100835a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 100935a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 101035a85ac6SBen Widawsky goto out; 101135a85ac6SBen Widawsky 1012e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1013e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1014e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1015e3689190SBen Widawsky 101635a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 101735a85ac6SBen Widawsky u32 reg; 101835a85ac6SBen Widawsky 101935a85ac6SBen Widawsky slice--; 102035a85ac6SBen Widawsky if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 102135a85ac6SBen Widawsky break; 102235a85ac6SBen Widawsky 102335a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 102435a85ac6SBen Widawsky 102535a85ac6SBen Widawsky reg = GEN7_L3CDERRST1 + (slice * 0x200); 102635a85ac6SBen Widawsky 102735a85ac6SBen Widawsky error_status = I915_READ(reg); 1028e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1029e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1030e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1031e3689190SBen Widawsky 103235a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 103335a85ac6SBen Widawsky POSTING_READ(reg); 1034e3689190SBen Widawsky 1035cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1036e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1037e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1038e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 103935a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 104035a85ac6SBen Widawsky parity_event[5] = NULL; 1041e3689190SBen Widawsky 1042e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1043e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1044e3689190SBen Widawsky 104535a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 104635a85ac6SBen Widawsky slice, row, bank, subbank); 1047e3689190SBen Widawsky 104835a85ac6SBen Widawsky kfree(parity_event[4]); 1049e3689190SBen Widawsky kfree(parity_event[3]); 1050e3689190SBen Widawsky kfree(parity_event[2]); 1051e3689190SBen Widawsky kfree(parity_event[1]); 1052e3689190SBen Widawsky } 1053e3689190SBen Widawsky 105435a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 105535a85ac6SBen Widawsky 105635a85ac6SBen Widawsky out: 105735a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 105835a85ac6SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 105935a85ac6SBen Widawsky ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 106035a85ac6SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 106135a85ac6SBen Widawsky 106235a85ac6SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 106335a85ac6SBen Widawsky } 106435a85ac6SBen Widawsky 106535a85ac6SBen Widawsky static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1066e3689190SBen Widawsky { 1067e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1068e3689190SBen Widawsky 1069040d2baaSBen Widawsky if (!HAS_L3_DPF(dev)) 1070e3689190SBen Widawsky return; 1071e3689190SBen Widawsky 1072d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 107335a85ac6SBen Widawsky ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1074d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1075e3689190SBen Widawsky 107635a85ac6SBen Widawsky iir &= GT_PARITY_ERROR(dev); 107735a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 107835a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 107935a85ac6SBen Widawsky 108035a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 108135a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 108235a85ac6SBen Widawsky 1083a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1084e3689190SBen Widawsky } 1085e3689190SBen Widawsky 1086f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 1087f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 1088f1af8fc1SPaulo Zanoni u32 gt_iir) 1089f1af8fc1SPaulo Zanoni { 1090f1af8fc1SPaulo Zanoni if (gt_iir & 1091f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1092f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 1093f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 1094f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 1095f1af8fc1SPaulo Zanoni } 1096f1af8fc1SPaulo Zanoni 1097e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 1098e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 1099e7b4c6b1SDaniel Vetter u32 gt_iir) 1100e7b4c6b1SDaniel Vetter { 1101e7b4c6b1SDaniel Vetter 1102cc609d5dSBen Widawsky if (gt_iir & 1103cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1104e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 1105cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 1106e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 1107cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 1108e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 1109e7b4c6b1SDaniel Vetter 1110cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1111cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1112cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1113e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1114e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 1115e7b4c6b1SDaniel Vetter } 1116e3689190SBen Widawsky 111735a85ac6SBen Widawsky if (gt_iir & GT_PARITY_ERROR(dev)) 111835a85ac6SBen Widawsky ivybridge_parity_error_irq_handler(dev, gt_iir); 1119e7b4c6b1SDaniel Vetter } 1120e7b4c6b1SDaniel Vetter 1121b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 1122b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 1123b543fb04SEgbert Eich 112410a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 1125b543fb04SEgbert Eich u32 hotplug_trigger, 1126b543fb04SEgbert Eich const u32 *hpd) 1127b543fb04SEgbert Eich { 1128b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 1129b543fb04SEgbert Eich int i; 113010a504deSDaniel Vetter bool storm_detected = false; 1131b543fb04SEgbert Eich 113291d131d2SDaniel Vetter if (!hotplug_trigger) 113391d131d2SDaniel Vetter return; 113491d131d2SDaniel Vetter 1135b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1136b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 1137821450c6SEgbert Eich 1138b8f102e8SEgbert Eich WARN(((hpd[i] & hotplug_trigger) && 1139b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1140b8f102e8SEgbert Eich "Received HPD interrupt although disabled\n"); 1141b8f102e8SEgbert Eich 1142b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 1143b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1144b543fb04SEgbert Eich continue; 1145b543fb04SEgbert Eich 1146bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 1147b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1148b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 1149b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1150b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1151b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 1152b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1153b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1154b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1155142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 1156b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 115710a504deSDaniel Vetter storm_detected = true; 1158b543fb04SEgbert Eich } else { 1159b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 1160b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1161b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt); 1162b543fb04SEgbert Eich } 1163b543fb04SEgbert Eich } 1164b543fb04SEgbert Eich 116510a504deSDaniel Vetter if (storm_detected) 116610a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 1167b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 11685876fa0dSDaniel Vetter 1169645416f5SDaniel Vetter /* 1170645416f5SDaniel Vetter * Our hotplug handler can grab modeset locks (by calling down into the 1171645416f5SDaniel Vetter * fb helpers). Hence it must not be run on our own dev-priv->wq work 1172645416f5SDaniel Vetter * queue for otherwise the flush_work in the pageflip code will 1173645416f5SDaniel Vetter * deadlock. 1174645416f5SDaniel Vetter */ 1175645416f5SDaniel Vetter schedule_work(&dev_priv->hotplug_work); 1176b543fb04SEgbert Eich } 1177b543fb04SEgbert Eich 1178515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 1179515ac2bbSDaniel Vetter { 118028c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 118128c70f16SDaniel Vetter 118228c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1183515ac2bbSDaniel Vetter } 1184515ac2bbSDaniel Vetter 1185ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 1186ce99c256SDaniel Vetter { 11879ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 11889ee32feaSDaniel Vetter 11899ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1190ce99c256SDaniel Vetter } 1191ce99c256SDaniel Vetter 11928bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 1193277de95eSDaniel Vetter static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1194eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1195eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 11968bc5e955SDaniel Vetter uint32_t crc4) 11978bf1e9f1SShuang He { 11988bf1e9f1SShuang He struct drm_i915_private *dev_priv = dev->dev_private; 11998bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 12008bf1e9f1SShuang He struct intel_pipe_crc_entry *entry; 1201ac2300d4SDamien Lespiau int head, tail; 1202b2c88f5bSDamien Lespiau 1203d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 1204d538bbdfSDamien Lespiau 12050c912c79SDamien Lespiau if (!pipe_crc->entries) { 1206d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 12070c912c79SDamien Lespiau DRM_ERROR("spurious interrupt\n"); 12080c912c79SDamien Lespiau return; 12090c912c79SDamien Lespiau } 12100c912c79SDamien Lespiau 1211d538bbdfSDamien Lespiau head = pipe_crc->head; 1212d538bbdfSDamien Lespiau tail = pipe_crc->tail; 1213b2c88f5bSDamien Lespiau 1214b2c88f5bSDamien Lespiau if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1215d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 1216b2c88f5bSDamien Lespiau DRM_ERROR("CRC buffer overflowing\n"); 1217b2c88f5bSDamien Lespiau return; 1218b2c88f5bSDamien Lespiau } 1219b2c88f5bSDamien Lespiau 1220b2c88f5bSDamien Lespiau entry = &pipe_crc->entries[head]; 12218bf1e9f1SShuang He 12228bc5e955SDaniel Vetter entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1223eba94eb9SDaniel Vetter entry->crc[0] = crc0; 1224eba94eb9SDaniel Vetter entry->crc[1] = crc1; 1225eba94eb9SDaniel Vetter entry->crc[2] = crc2; 1226eba94eb9SDaniel Vetter entry->crc[3] = crc3; 1227eba94eb9SDaniel Vetter entry->crc[4] = crc4; 1228b2c88f5bSDamien Lespiau 1229b2c88f5bSDamien Lespiau head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1230d538bbdfSDamien Lespiau pipe_crc->head = head; 1231d538bbdfSDamien Lespiau 1232d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 123307144428SDamien Lespiau 123407144428SDamien Lespiau wake_up_interruptible(&pipe_crc->wq); 12358bf1e9f1SShuang He } 1236277de95eSDaniel Vetter #else 1237277de95eSDaniel Vetter static inline void 1238277de95eSDaniel Vetter display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1239277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1240277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1241277de95eSDaniel Vetter uint32_t crc4) {} 1242277de95eSDaniel Vetter #endif 1243eba94eb9SDaniel Vetter 1244277de95eSDaniel Vetter 1245277de95eSDaniel Vetter static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 12465a69b89fSDaniel Vetter { 12475a69b89fSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 12485a69b89fSDaniel Vetter 1249277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 12505a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 12515a69b89fSDaniel Vetter 0, 0, 0, 0); 12525a69b89fSDaniel Vetter } 12535a69b89fSDaniel Vetter 1254277de95eSDaniel Vetter static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1255eba94eb9SDaniel Vetter { 1256eba94eb9SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 1257eba94eb9SDaniel Vetter 1258277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 1259eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1260eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1261eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1262eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 12638bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1264eba94eb9SDaniel Vetter } 12655b3a856bSDaniel Vetter 1266277de95eSDaniel Vetter static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 12675b3a856bSDaniel Vetter { 12685b3a856bSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 12690b5c5ed0SDaniel Vetter uint32_t res1, res2; 12700b5c5ed0SDaniel Vetter 12710b5c5ed0SDaniel Vetter if (INTEL_INFO(dev)->gen >= 3) 12720b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 12730b5c5ed0SDaniel Vetter else 12740b5c5ed0SDaniel Vetter res1 = 0; 12750b5c5ed0SDaniel Vetter 12760b5c5ed0SDaniel Vetter if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 12770b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 12780b5c5ed0SDaniel Vetter else 12790b5c5ed0SDaniel Vetter res2 = 0; 12805b3a856bSDaniel Vetter 1281277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 12820b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 12830b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 12840b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 12850b5c5ed0SDaniel Vetter res1, res2); 12865b3a856bSDaniel Vetter } 12878bf1e9f1SShuang He 12881403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 12891403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 12901403c0d4SPaulo Zanoni * the work queue. */ 12911403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1292baf02a1fSBen Widawsky { 129341a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 129459cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 12954848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 12964d3b3d5fSPaulo Zanoni snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 129759cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 12982adbee62SDaniel Vetter 12992adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 130041a05a3aSDaniel Vetter } 1301baf02a1fSBen Widawsky 13021403c0d4SPaulo Zanoni if (HAS_VEBOX(dev_priv->dev)) { 130312638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 130412638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 130512638c57SBen Widawsky 130612638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 130712638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 130812638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 130912638c57SBen Widawsky } 131012638c57SBen Widawsky } 13111403c0d4SPaulo Zanoni } 1312baf02a1fSBen Widawsky 1313ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 13147e231dbeSJesse Barnes { 13157e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 13167e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 13177e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 13187e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 13197e231dbeSJesse Barnes unsigned long irqflags; 13207e231dbeSJesse Barnes int pipe; 13217e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 13227e231dbeSJesse Barnes 13237e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 13247e231dbeSJesse Barnes 13257e231dbeSJesse Barnes while (true) { 13267e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 13277e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 13287e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 13297e231dbeSJesse Barnes 13307e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 13317e231dbeSJesse Barnes goto out; 13327e231dbeSJesse Barnes 13337e231dbeSJesse Barnes ret = IRQ_HANDLED; 13347e231dbeSJesse Barnes 1335e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 13367e231dbeSJesse Barnes 13377e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 13387e231dbeSJesse Barnes for_each_pipe(pipe) { 13397e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 13407e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 13417e231dbeSJesse Barnes 13427e231dbeSJesse Barnes /* 13437e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 13447e231dbeSJesse Barnes */ 13457e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 13467e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 13477e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 13487e231dbeSJesse Barnes pipe_name(pipe)); 13497e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 13507e231dbeSJesse Barnes } 13517e231dbeSJesse Barnes } 13527e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 13537e231dbeSJesse Barnes 135431acc7f5SJesse Barnes for_each_pipe(pipe) { 135531acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 135631acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 135731acc7f5SJesse Barnes 135831acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 135931acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 136031acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 136131acc7f5SJesse Barnes } 13624356d586SDaniel Vetter 13634356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1364277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 136531acc7f5SJesse Barnes } 136631acc7f5SJesse Barnes 13677e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 13687e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 13697e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1370b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 13717e231dbeSJesse Barnes 13727e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 13737e231dbeSJesse Barnes hotplug_status); 137491d131d2SDaniel Vetter 137510a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 137691d131d2SDaniel Vetter 13777e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 13787e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 13797e231dbeSJesse Barnes } 13807e231dbeSJesse Barnes 1381515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1382515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 13837e231dbeSJesse Barnes 138460611c13SPaulo Zanoni if (pm_iir) 1385d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 13867e231dbeSJesse Barnes 13877e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 13887e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 13897e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 13907e231dbeSJesse Barnes } 13917e231dbeSJesse Barnes 13927e231dbeSJesse Barnes out: 13937e231dbeSJesse Barnes return ret; 13947e231dbeSJesse Barnes } 13957e231dbeSJesse Barnes 139623e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1397776ad806SJesse Barnes { 1398776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 13999db4a9c7SJesse Barnes int pipe; 1400b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1401776ad806SJesse Barnes 140210a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 140391d131d2SDaniel Vetter 1404cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1405cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1406776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1407cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1408cfc33bf7SVille Syrjälä port_name(port)); 1409cfc33bf7SVille Syrjälä } 1410776ad806SJesse Barnes 1411ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1412ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1413ce99c256SDaniel Vetter 1414776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1415515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1416776ad806SJesse Barnes 1417776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1418776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1419776ad806SJesse Barnes 1420776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1421776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1422776ad806SJesse Barnes 1423776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1424776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1425776ad806SJesse Barnes 14269db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 14279db4a9c7SJesse Barnes for_each_pipe(pipe) 14289db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 14299db4a9c7SJesse Barnes pipe_name(pipe), 14309db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1431776ad806SJesse Barnes 1432776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1433776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1434776ad806SJesse Barnes 1435776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1436776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1437776ad806SJesse Barnes 1438776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 14398664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 14408664281bSPaulo Zanoni false)) 14418664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 14428664281bSPaulo Zanoni 14438664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 14448664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 14458664281bSPaulo Zanoni false)) 14468664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 14478664281bSPaulo Zanoni } 14488664281bSPaulo Zanoni 14498664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 14508664281bSPaulo Zanoni { 14518664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 14528664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 14535a69b89fSDaniel Vetter enum pipe pipe; 14548664281bSPaulo Zanoni 1455de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1456de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1457de032bf4SPaulo Zanoni 14585a69b89fSDaniel Vetter for_each_pipe(pipe) { 14595a69b89fSDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 14605a69b89fSDaniel Vetter if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 14615a69b89fSDaniel Vetter false)) 14625a69b89fSDaniel Vetter DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 14635a69b89fSDaniel Vetter pipe_name(pipe)); 14645a69b89fSDaniel Vetter } 14658664281bSPaulo Zanoni 14665a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 14675a69b89fSDaniel Vetter if (IS_IVYBRIDGE(dev)) 1468277de95eSDaniel Vetter ivb_pipe_crc_irq_handler(dev, pipe); 14695a69b89fSDaniel Vetter else 1470277de95eSDaniel Vetter hsw_pipe_crc_irq_handler(dev, pipe); 14715a69b89fSDaniel Vetter } 14725a69b89fSDaniel Vetter } 14738bf1e9f1SShuang He 14748664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 14758664281bSPaulo Zanoni } 14768664281bSPaulo Zanoni 14778664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 14788664281bSPaulo Zanoni { 14798664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 14808664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 14818664281bSPaulo Zanoni 1482de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1483de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1484de032bf4SPaulo Zanoni 14858664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 14868664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 14878664281bSPaulo Zanoni false)) 14888664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 14898664281bSPaulo Zanoni 14908664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 14918664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 14928664281bSPaulo Zanoni false)) 14938664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 14948664281bSPaulo Zanoni 14958664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 14968664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 14978664281bSPaulo Zanoni false)) 14988664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 14998664281bSPaulo Zanoni 15008664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1501776ad806SJesse Barnes } 1502776ad806SJesse Barnes 150323e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 150423e81d69SAdam Jackson { 150523e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 150623e81d69SAdam Jackson int pipe; 1507b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 150823e81d69SAdam Jackson 150910a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 151091d131d2SDaniel Vetter 1511cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1512cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 151323e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1514cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1515cfc33bf7SVille Syrjälä port_name(port)); 1516cfc33bf7SVille Syrjälä } 151723e81d69SAdam Jackson 151823e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1519ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 152023e81d69SAdam Jackson 152123e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1522515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 152323e81d69SAdam Jackson 152423e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 152523e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 152623e81d69SAdam Jackson 152723e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 152823e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 152923e81d69SAdam Jackson 153023e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 153123e81d69SAdam Jackson for_each_pipe(pipe) 153223e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 153323e81d69SAdam Jackson pipe_name(pipe), 153423e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 15358664281bSPaulo Zanoni 15368664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 15378664281bSPaulo Zanoni cpt_serr_int_handler(dev); 153823e81d69SAdam Jackson } 153923e81d69SAdam Jackson 1540c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1541c008bc6eSPaulo Zanoni { 1542c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1543c008bc6eSPaulo Zanoni 1544c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1545c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1546c008bc6eSPaulo Zanoni 1547c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1548c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1549c008bc6eSPaulo Zanoni 1550c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_VBLANK) 1551c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 0); 1552c008bc6eSPaulo Zanoni 1553c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_VBLANK) 1554c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 1); 1555c008bc6eSPaulo Zanoni 1556c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1557c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1558c008bc6eSPaulo Zanoni 1559c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1560c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1561c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1562c008bc6eSPaulo Zanoni 1563c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1564c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1565c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1566c008bc6eSPaulo Zanoni 15675b3a856bSDaniel Vetter if (de_iir & DE_PIPEA_CRC_DONE) 1568277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, PIPE_A); 15695b3a856bSDaniel Vetter 15705b3a856bSDaniel Vetter if (de_iir & DE_PIPEB_CRC_DONE) 1571277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, PIPE_B); 15725b3a856bSDaniel Vetter 1573c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEA_FLIP_DONE) { 1574c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 0); 1575c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 0); 1576c008bc6eSPaulo Zanoni } 1577c008bc6eSPaulo Zanoni 1578c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEB_FLIP_DONE) { 1579c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 1); 1580c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 1); 1581c008bc6eSPaulo Zanoni } 1582c008bc6eSPaulo Zanoni 1583c008bc6eSPaulo Zanoni /* check event from PCH */ 1584c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1585c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1586c008bc6eSPaulo Zanoni 1587c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1588c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1589c008bc6eSPaulo Zanoni else 1590c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1591c008bc6eSPaulo Zanoni 1592c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1593c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1594c008bc6eSPaulo Zanoni } 1595c008bc6eSPaulo Zanoni 1596c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1597c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1598c008bc6eSPaulo Zanoni } 1599c008bc6eSPaulo Zanoni 16009719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 16019719fb98SPaulo Zanoni { 16029719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 16039719fb98SPaulo Zanoni int i; 16049719fb98SPaulo Zanoni 16059719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 16069719fb98SPaulo Zanoni ivb_err_int_handler(dev); 16079719fb98SPaulo Zanoni 16089719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 16099719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 16109719fb98SPaulo Zanoni 16119719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 16129719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 16139719fb98SPaulo Zanoni 16149719fb98SPaulo Zanoni for (i = 0; i < 3; i++) { 16159719fb98SPaulo Zanoni if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 16169719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 16179719fb98SPaulo Zanoni if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 16189719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 16199719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 16209719fb98SPaulo Zanoni } 16219719fb98SPaulo Zanoni } 16229719fb98SPaulo Zanoni 16239719fb98SPaulo Zanoni /* check event from PCH */ 16249719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 16259719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 16269719fb98SPaulo Zanoni 16279719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 16289719fb98SPaulo Zanoni 16299719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 16309719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 16319719fb98SPaulo Zanoni } 16329719fb98SPaulo Zanoni } 16339719fb98SPaulo Zanoni 1634f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1635b1f14ad0SJesse Barnes { 1636b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1637b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1638f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 16390e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1640b1f14ad0SJesse Barnes 1641b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1642b1f14ad0SJesse Barnes 16438664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 16448664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 1645907b28c5SChris Wilson intel_uncore_check_errors(dev); 16468664281bSPaulo Zanoni 1647b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1648b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1649b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 165023a78516SPaulo Zanoni POSTING_READ(DEIER); 16510e43406bSChris Wilson 165244498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 165344498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 165444498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 165544498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 165644498aeaSPaulo Zanoni * due to its back queue). */ 1657ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 165844498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 165944498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 166044498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1661ab5c608bSBen Widawsky } 166244498aeaSPaulo Zanoni 16630e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 16640e43406bSChris Wilson if (gt_iir) { 1665d8fc8a47SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) 16660e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 1667d8fc8a47SPaulo Zanoni else 1668d8fc8a47SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 16690e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 16700e43406bSChris Wilson ret = IRQ_HANDLED; 16710e43406bSChris Wilson } 1672b1f14ad0SJesse Barnes 1673b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 16740e43406bSChris Wilson if (de_iir) { 1675f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 16769719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1677f1af8fc1SPaulo Zanoni else 1678f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 16790e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 16800e43406bSChris Wilson ret = IRQ_HANDLED; 16810e43406bSChris Wilson } 16820e43406bSChris Wilson 1683f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1684f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 16850e43406bSChris Wilson if (pm_iir) { 1686d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1687b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 16880e43406bSChris Wilson ret = IRQ_HANDLED; 16890e43406bSChris Wilson } 1690f1af8fc1SPaulo Zanoni } 1691b1f14ad0SJesse Barnes 1692b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1693b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1694ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 169544498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 169644498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1697ab5c608bSBen Widawsky } 1698b1f14ad0SJesse Barnes 1699b1f14ad0SJesse Barnes return ret; 1700b1f14ad0SJesse Barnes } 1701b1f14ad0SJesse Barnes 170217e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv, 170317e1df07SDaniel Vetter bool reset_completed) 170417e1df07SDaniel Vetter { 170517e1df07SDaniel Vetter struct intel_ring_buffer *ring; 170617e1df07SDaniel Vetter int i; 170717e1df07SDaniel Vetter 170817e1df07SDaniel Vetter /* 170917e1df07SDaniel Vetter * Notify all waiters for GPU completion events that reset state has 171017e1df07SDaniel Vetter * been changed, and that they need to restart their wait after 171117e1df07SDaniel Vetter * checking for potential errors (and bail out to drop locks if there is 171217e1df07SDaniel Vetter * a gpu reset pending so that i915_error_work_func can acquire them). 171317e1df07SDaniel Vetter */ 171417e1df07SDaniel Vetter 171517e1df07SDaniel Vetter /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 171617e1df07SDaniel Vetter for_each_ring(ring, dev_priv, i) 171717e1df07SDaniel Vetter wake_up_all(&ring->irq_queue); 171817e1df07SDaniel Vetter 171917e1df07SDaniel Vetter /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 172017e1df07SDaniel Vetter wake_up_all(&dev_priv->pending_flip_queue); 172117e1df07SDaniel Vetter 172217e1df07SDaniel Vetter /* 172317e1df07SDaniel Vetter * Signal tasks blocked in i915_gem_wait_for_error that the pending 172417e1df07SDaniel Vetter * reset state is cleared. 172517e1df07SDaniel Vetter */ 172617e1df07SDaniel Vetter if (reset_completed) 172717e1df07SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 172817e1df07SDaniel Vetter } 172917e1df07SDaniel Vetter 17308a905236SJesse Barnes /** 17318a905236SJesse Barnes * i915_error_work_func - do process context error handling work 17328a905236SJesse Barnes * @work: work struct 17338a905236SJesse Barnes * 17348a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 17358a905236SJesse Barnes * was detected. 17368a905236SJesse Barnes */ 17378a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 17388a905236SJesse Barnes { 17391f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 17401f83fee0SDaniel Vetter work); 17411f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 17421f83fee0SDaniel Vetter gpu_error); 17438a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1744cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1745cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1746cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 174717e1df07SDaniel Vetter int ret; 17488a905236SJesse Barnes 1749f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 17508a905236SJesse Barnes 17517db0ba24SDaniel Vetter /* 17527db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 17537db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 17547db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 17557db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 17567db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 17577db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 17587db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 17597db0ba24SDaniel Vetter * work we don't need to worry about any other races. 17607db0ba24SDaniel Vetter */ 17617db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 176244d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 17637db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 17647db0ba24SDaniel Vetter reset_event); 17651f83fee0SDaniel Vetter 176617e1df07SDaniel Vetter /* 176717e1df07SDaniel Vetter * All state reset _must_ be completed before we update the 176817e1df07SDaniel Vetter * reset counter, for otherwise waiters might miss the reset 176917e1df07SDaniel Vetter * pending state and not properly drop locks, resulting in 177017e1df07SDaniel Vetter * deadlocks with the reset work. 177117e1df07SDaniel Vetter */ 1772f69061beSDaniel Vetter ret = i915_reset(dev); 1773f69061beSDaniel Vetter 177417e1df07SDaniel Vetter intel_display_handle_reset(dev); 177517e1df07SDaniel Vetter 1776f69061beSDaniel Vetter if (ret == 0) { 1777f69061beSDaniel Vetter /* 1778f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1779f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1780f69061beSDaniel Vetter * complete. 1781f69061beSDaniel Vetter * 1782f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1783f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1784f69061beSDaniel Vetter * updates before 1785f69061beSDaniel Vetter * the counter increment. 1786f69061beSDaniel Vetter */ 1787f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1788f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1789f69061beSDaniel Vetter 1790f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1791f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 17921f83fee0SDaniel Vetter } else { 17931f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1794f316a42cSBen Gamari } 17951f83fee0SDaniel Vetter 179617e1df07SDaniel Vetter /* 179717e1df07SDaniel Vetter * Note: The wake_up also serves as a memory barrier so that 179817e1df07SDaniel Vetter * waiters see the update value of the reset counter atomic_t. 179917e1df07SDaniel Vetter */ 180017e1df07SDaniel Vetter i915_error_wake_up(dev_priv, true); 1801f316a42cSBen Gamari } 18028a905236SJesse Barnes } 18038a905236SJesse Barnes 180435aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1805c0e09200SDave Airlie { 18068a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1807bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 180863eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1809050ee91fSBen Widawsky int pipe, i; 181063eeaf38SJesse Barnes 181135aed2e6SChris Wilson if (!eir) 181235aed2e6SChris Wilson return; 181363eeaf38SJesse Barnes 1814a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 18158a905236SJesse Barnes 1816bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1817bd9854f9SBen Widawsky 18188a905236SJesse Barnes if (IS_G4X(dev)) { 18198a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 18208a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 18218a905236SJesse Barnes 1822a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1823a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1824050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1825050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1826a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1827a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 18288a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 18293143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 18308a905236SJesse Barnes } 18318a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 18328a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1833a70491ccSJoe Perches pr_err("page table error\n"); 1834a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 18358a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 18363143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 18378a905236SJesse Barnes } 18388a905236SJesse Barnes } 18398a905236SJesse Barnes 1840a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 184163eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 184263eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1843a70491ccSJoe Perches pr_err("page table error\n"); 1844a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 184563eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 18463143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 184763eeaf38SJesse Barnes } 18488a905236SJesse Barnes } 18498a905236SJesse Barnes 185063eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 1851a70491ccSJoe Perches pr_err("memory refresh error:\n"); 18529db4a9c7SJesse Barnes for_each_pipe(pipe) 1853a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 18549db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 185563eeaf38SJesse Barnes /* pipestat has already been acked */ 185663eeaf38SJesse Barnes } 185763eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 1858a70491ccSJoe Perches pr_err("instruction error\n"); 1859a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1860050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1861050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1862a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 186363eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 186463eeaf38SJesse Barnes 1865a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1866a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1867a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 186863eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 18693143a2bfSChris Wilson POSTING_READ(IPEIR); 187063eeaf38SJesse Barnes } else { 187163eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 187263eeaf38SJesse Barnes 1873a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1874a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1875a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1876a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 187763eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 18783143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 187963eeaf38SJesse Barnes } 188063eeaf38SJesse Barnes } 188163eeaf38SJesse Barnes 188263eeaf38SJesse Barnes I915_WRITE(EIR, eir); 18833143a2bfSChris Wilson POSTING_READ(EIR); 188463eeaf38SJesse Barnes eir = I915_READ(EIR); 188563eeaf38SJesse Barnes if (eir) { 188663eeaf38SJesse Barnes /* 188763eeaf38SJesse Barnes * some errors might have become stuck, 188863eeaf38SJesse Barnes * mask them. 188963eeaf38SJesse Barnes */ 189063eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 189163eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 189263eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 189363eeaf38SJesse Barnes } 189435aed2e6SChris Wilson } 189535aed2e6SChris Wilson 189635aed2e6SChris Wilson /** 189735aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 189835aed2e6SChris Wilson * @dev: drm device 189935aed2e6SChris Wilson * 190035aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 190135aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 190235aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 190335aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 190435aed2e6SChris Wilson * of a ring dump etc.). 190535aed2e6SChris Wilson */ 1906527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 190735aed2e6SChris Wilson { 190835aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 190935aed2e6SChris Wilson 191035aed2e6SChris Wilson i915_capture_error_state(dev); 191135aed2e6SChris Wilson i915_report_and_clear_eir(dev); 19128a905236SJesse Barnes 1913ba1234d1SBen Gamari if (wedged) { 1914f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1915f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 1916ba1234d1SBen Gamari 191711ed50ecSBen Gamari /* 191817e1df07SDaniel Vetter * Wakeup waiting processes so that the reset work function 191917e1df07SDaniel Vetter * i915_error_work_func doesn't deadlock trying to grab various 192017e1df07SDaniel Vetter * locks. By bumping the reset counter first, the woken 192117e1df07SDaniel Vetter * processes will see a reset in progress and back off, 192217e1df07SDaniel Vetter * releasing their locks and then wait for the reset completion. 192317e1df07SDaniel Vetter * We must do this for _all_ gpu waiters that might hold locks 192417e1df07SDaniel Vetter * that the reset work needs to acquire. 192517e1df07SDaniel Vetter * 192617e1df07SDaniel Vetter * Note: The wake_up serves as the required memory barrier to 192717e1df07SDaniel Vetter * ensure that the waiters see the updated value of the reset 192817e1df07SDaniel Vetter * counter atomic_t. 192911ed50ecSBen Gamari */ 193017e1df07SDaniel Vetter i915_error_wake_up(dev_priv, false); 193111ed50ecSBen Gamari } 193211ed50ecSBen Gamari 1933122f46baSDaniel Vetter /* 1934122f46baSDaniel Vetter * Our reset work can grab modeset locks (since it needs to reset the 1935122f46baSDaniel Vetter * state of outstanding pagelips). Hence it must not be run on our own 1936122f46baSDaniel Vetter * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1937122f46baSDaniel Vetter * code will deadlock. 1938122f46baSDaniel Vetter */ 1939122f46baSDaniel Vetter schedule_work(&dev_priv->gpu_error.work); 19408a905236SJesse Barnes } 19418a905236SJesse Barnes 194221ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 19434e5359cdSSimon Farnsworth { 19444e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 19454e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 19464e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 194705394f39SChris Wilson struct drm_i915_gem_object *obj; 19484e5359cdSSimon Farnsworth struct intel_unpin_work *work; 19494e5359cdSSimon Farnsworth unsigned long flags; 19504e5359cdSSimon Farnsworth bool stall_detected; 19514e5359cdSSimon Farnsworth 19524e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 19534e5359cdSSimon Farnsworth if (intel_crtc == NULL) 19544e5359cdSSimon Farnsworth return; 19554e5359cdSSimon Farnsworth 19564e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 19574e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 19584e5359cdSSimon Farnsworth 1959e7d841caSChris Wilson if (work == NULL || 1960e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1961e7d841caSChris Wilson !work->enable_stall_check) { 19624e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 19634e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 19644e5359cdSSimon Farnsworth return; 19654e5359cdSSimon Farnsworth } 19664e5359cdSSimon Farnsworth 19674e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 196805394f39SChris Wilson obj = work->pending_flip_obj; 1969a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 19709db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 1971446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1972f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 19734e5359cdSSimon Farnsworth } else { 19749db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 1975f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 197601f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 19774e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 19784e5359cdSSimon Farnsworth } 19794e5359cdSSimon Farnsworth 19804e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 19814e5359cdSSimon Farnsworth 19824e5359cdSSimon Farnsworth if (stall_detected) { 19834e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 19844e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 19854e5359cdSSimon Farnsworth } 19864e5359cdSSimon Farnsworth } 19874e5359cdSSimon Farnsworth 198842f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 198942f52ef8SKeith Packard * we use as a pipe index 199042f52ef8SKeith Packard */ 1991f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 19920a3e67a4SJesse Barnes { 19930a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1994e9d21d7fSKeith Packard unsigned long irqflags; 199571e0ffa5SJesse Barnes 19965eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 199771e0ffa5SJesse Barnes return -EINVAL; 19980a3e67a4SJesse Barnes 19991ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2000f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 20017c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 20027c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 20030a3e67a4SJesse Barnes else 20047c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 20057c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 20068692d00eSChris Wilson 20078692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 20088692d00eSChris Wilson if (dev_priv->info->gen == 3) 20096b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 20101ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 20118692d00eSChris Wilson 20120a3e67a4SJesse Barnes return 0; 20130a3e67a4SJesse Barnes } 20140a3e67a4SJesse Barnes 2015f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2016f796cf8fSJesse Barnes { 2017f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2018f796cf8fSJesse Barnes unsigned long irqflags; 2019b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2020b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 2021f796cf8fSJesse Barnes 2022f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2023f796cf8fSJesse Barnes return -EINVAL; 2024f796cf8fSJesse Barnes 2025f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2026b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2027b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2028b1f14ad0SJesse Barnes 2029b1f14ad0SJesse Barnes return 0; 2030b1f14ad0SJesse Barnes } 2031b1f14ad0SJesse Barnes 20327e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 20337e231dbeSJesse Barnes { 20347e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 20357e231dbeSJesse Barnes unsigned long irqflags; 203631acc7f5SJesse Barnes u32 imr; 20377e231dbeSJesse Barnes 20387e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 20397e231dbeSJesse Barnes return -EINVAL; 20407e231dbeSJesse Barnes 20417e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 20427e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 204331acc7f5SJesse Barnes if (pipe == 0) 20447e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 204531acc7f5SJesse Barnes else 20467e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 20477e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 204831acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 204931acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 20507e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 20517e231dbeSJesse Barnes 20527e231dbeSJesse Barnes return 0; 20537e231dbeSJesse Barnes } 20547e231dbeSJesse Barnes 205542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 205642f52ef8SKeith Packard * we use as a pipe index 205742f52ef8SKeith Packard */ 2058f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 20590a3e67a4SJesse Barnes { 20600a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2061e9d21d7fSKeith Packard unsigned long irqflags; 20620a3e67a4SJesse Barnes 20631ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 20648692d00eSChris Wilson if (dev_priv->info->gen == 3) 20656b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 20668692d00eSChris Wilson 20677c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 20687c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 20697c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 20701ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 20710a3e67a4SJesse Barnes } 20720a3e67a4SJesse Barnes 2073f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2074f796cf8fSJesse Barnes { 2075f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2076f796cf8fSJesse Barnes unsigned long irqflags; 2077b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2078b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 2079f796cf8fSJesse Barnes 2080f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2081b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2082b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2083b1f14ad0SJesse Barnes } 2084b1f14ad0SJesse Barnes 20857e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 20867e231dbeSJesse Barnes { 20877e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 20887e231dbeSJesse Barnes unsigned long irqflags; 208931acc7f5SJesse Barnes u32 imr; 20907e231dbeSJesse Barnes 20917e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 209231acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 209331acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 20947e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 209531acc7f5SJesse Barnes if (pipe == 0) 20967e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 209731acc7f5SJesse Barnes else 20987e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 20997e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 21007e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 21017e231dbeSJesse Barnes } 21027e231dbeSJesse Barnes 2103893eead0SChris Wilson static u32 2104893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 2105852835f3SZou Nan hai { 2106893eead0SChris Wilson return list_entry(ring->request_list.prev, 2107893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 2108893eead0SChris Wilson } 2109893eead0SChris Wilson 21109107e9d2SChris Wilson static bool 21119107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2112893eead0SChris Wilson { 21139107e9d2SChris Wilson return (list_empty(&ring->request_list) || 21149107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 2115f65d9421SBen Gamari } 2116f65d9421SBen Gamari 21176274f212SChris Wilson static struct intel_ring_buffer * 21186274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2119a24a11e6SChris Wilson { 2120a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 21216274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 2122a24a11e6SChris Wilson 2123a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2124a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 2125a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 21266274f212SChris Wilson return NULL; 2127a24a11e6SChris Wilson 2128a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 2129a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 2130a24a11e6SChris Wilson */ 21316274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2132a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 2133a24a11e6SChris Wilson do { 2134a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 2135a24a11e6SChris Wilson if (cmd == ipehr) 2136a24a11e6SChris Wilson break; 2137a24a11e6SChris Wilson 2138a24a11e6SChris Wilson acthd -= 4; 2139a24a11e6SChris Wilson if (acthd < acthd_min) 21406274f212SChris Wilson return NULL; 2141a24a11e6SChris Wilson } while (1); 2142a24a11e6SChris Wilson 21436274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 21446274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2145a24a11e6SChris Wilson } 2146a24a11e6SChris Wilson 21476274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 21486274f212SChris Wilson { 21496274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 21506274f212SChris Wilson struct intel_ring_buffer *signaller; 21516274f212SChris Wilson u32 seqno, ctl; 21526274f212SChris Wilson 21536274f212SChris Wilson ring->hangcheck.deadlock = true; 21546274f212SChris Wilson 21556274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 21566274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 21576274f212SChris Wilson return -1; 21586274f212SChris Wilson 21596274f212SChris Wilson /* cursory check for an unkickable deadlock */ 21606274f212SChris Wilson ctl = I915_READ_CTL(signaller); 21616274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 21626274f212SChris Wilson return -1; 21636274f212SChris Wilson 21646274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 21656274f212SChris Wilson } 21666274f212SChris Wilson 21676274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 21686274f212SChris Wilson { 21696274f212SChris Wilson struct intel_ring_buffer *ring; 21706274f212SChris Wilson int i; 21716274f212SChris Wilson 21726274f212SChris Wilson for_each_ring(ring, dev_priv, i) 21736274f212SChris Wilson ring->hangcheck.deadlock = false; 21746274f212SChris Wilson } 21756274f212SChris Wilson 2176ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 2177ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 21781ec14ad3SChris Wilson { 21791ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 21801ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 21819107e9d2SChris Wilson u32 tmp; 21829107e9d2SChris Wilson 21836274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 2184f2f4d82fSJani Nikula return HANGCHECK_ACTIVE; 21856274f212SChris Wilson 21869107e9d2SChris Wilson if (IS_GEN2(dev)) 2187f2f4d82fSJani Nikula return HANGCHECK_HUNG; 21889107e9d2SChris Wilson 21899107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 21909107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 21919107e9d2SChris Wilson * and break the hang. This should work on 21929107e9d2SChris Wilson * all but the second generation chipsets. 21939107e9d2SChris Wilson */ 21949107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 21951ec14ad3SChris Wilson if (tmp & RING_WAIT) { 21961ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 21971ec14ad3SChris Wilson ring->name); 219809e14bf3SChris Wilson i915_handle_error(dev, false); 21991ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 2200f2f4d82fSJani Nikula return HANGCHECK_KICK; 22011ec14ad3SChris Wilson } 2202a24a11e6SChris Wilson 22036274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 22046274f212SChris Wilson switch (semaphore_passed(ring)) { 22056274f212SChris Wilson default: 2206f2f4d82fSJani Nikula return HANGCHECK_HUNG; 22076274f212SChris Wilson case 1: 2208a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 2209a24a11e6SChris Wilson ring->name); 221009e14bf3SChris Wilson i915_handle_error(dev, false); 2211a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 2212f2f4d82fSJani Nikula return HANGCHECK_KICK; 22136274f212SChris Wilson case 0: 2214f2f4d82fSJani Nikula return HANGCHECK_WAIT; 22156274f212SChris Wilson } 22169107e9d2SChris Wilson } 22179107e9d2SChris Wilson 2218f2f4d82fSJani Nikula return HANGCHECK_HUNG; 2219a24a11e6SChris Wilson } 2220d1e61e7fSChris Wilson 2221f65d9421SBen Gamari /** 2222f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 222305407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 222405407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 222505407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 222605407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 222705407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 2228f65d9421SBen Gamari */ 2229a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data) 2230f65d9421SBen Gamari { 2231f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2232f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2233b4519513SChris Wilson struct intel_ring_buffer *ring; 2234b4519513SChris Wilson int i; 223505407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 22369107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 22379107e9d2SChris Wilson #define BUSY 1 22389107e9d2SChris Wilson #define KICK 5 22399107e9d2SChris Wilson #define HUNG 20 22409107e9d2SChris Wilson #define FIRE 30 2241893eead0SChris Wilson 22423e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 22433e0dc6b0SBen Widawsky return; 22443e0dc6b0SBen Widawsky 2245b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 224605407ff8SMika Kuoppala u32 seqno, acthd; 22479107e9d2SChris Wilson bool busy = true; 2248b4519513SChris Wilson 22496274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 22506274f212SChris Wilson 225105407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 225205407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 225305407ff8SMika Kuoppala 225405407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 22559107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 2256da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_IDLE; 2257da661464SMika Kuoppala 22589107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 22599107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 2260094f9a54SChris Wilson if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2261*f4adcd24SDaniel Vetter if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 22629107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 22639107e9d2SChris Wilson ring->name); 2264*f4adcd24SDaniel Vetter else 2265*f4adcd24SDaniel Vetter DRM_INFO("Fake missed irq on %s\n", 2266*f4adcd24SDaniel Vetter ring->name); 22679107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 2268094f9a54SChris Wilson } 2269094f9a54SChris Wilson /* Safeguard against driver failure */ 2270094f9a54SChris Wilson ring->hangcheck.score += BUSY; 22719107e9d2SChris Wilson } else 22729107e9d2SChris Wilson busy = false; 227305407ff8SMika Kuoppala } else { 22746274f212SChris Wilson /* We always increment the hangcheck score 22756274f212SChris Wilson * if the ring is busy and still processing 22766274f212SChris Wilson * the same request, so that no single request 22776274f212SChris Wilson * can run indefinitely (such as a chain of 22786274f212SChris Wilson * batches). The only time we do not increment 22796274f212SChris Wilson * the hangcheck score on this ring, if this 22806274f212SChris Wilson * ring is in a legitimate wait for another 22816274f212SChris Wilson * ring. In that case the waiting ring is a 22826274f212SChris Wilson * victim and we want to be sure we catch the 22836274f212SChris Wilson * right culprit. Then every time we do kick 22846274f212SChris Wilson * the ring, add a small increment to the 22856274f212SChris Wilson * score so that we can catch a batch that is 22866274f212SChris Wilson * being repeatedly kicked and so responsible 22876274f212SChris Wilson * for stalling the machine. 22889107e9d2SChris Wilson */ 2289ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 2290ad8beaeaSMika Kuoppala acthd); 2291ad8beaeaSMika Kuoppala 2292ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 2293da661464SMika Kuoppala case HANGCHECK_IDLE: 2294f2f4d82fSJani Nikula case HANGCHECK_WAIT: 22956274f212SChris Wilson break; 2296f2f4d82fSJani Nikula case HANGCHECK_ACTIVE: 2297ea04cb31SJani Nikula ring->hangcheck.score += BUSY; 22986274f212SChris Wilson break; 2299f2f4d82fSJani Nikula case HANGCHECK_KICK: 2300ea04cb31SJani Nikula ring->hangcheck.score += KICK; 23016274f212SChris Wilson break; 2302f2f4d82fSJani Nikula case HANGCHECK_HUNG: 2303ea04cb31SJani Nikula ring->hangcheck.score += HUNG; 23046274f212SChris Wilson stuck[i] = true; 23056274f212SChris Wilson break; 23066274f212SChris Wilson } 230705407ff8SMika Kuoppala } 23089107e9d2SChris Wilson } else { 2309da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_ACTIVE; 2310da661464SMika Kuoppala 23119107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 23129107e9d2SChris Wilson * attempts across multiple batches. 23139107e9d2SChris Wilson */ 23149107e9d2SChris Wilson if (ring->hangcheck.score > 0) 23159107e9d2SChris Wilson ring->hangcheck.score--; 2316cbb465e7SChris Wilson } 2317f65d9421SBen Gamari 231805407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 231905407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 23209107e9d2SChris Wilson busy_count += busy; 232105407ff8SMika Kuoppala } 232205407ff8SMika Kuoppala 232305407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 23249107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2325b8d88d1dSDaniel Vetter DRM_INFO("%s on %s\n", 232605407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2327a43adf07SChris Wilson ring->name); 2328a43adf07SChris Wilson rings_hung++; 232905407ff8SMika Kuoppala } 233005407ff8SMika Kuoppala } 233105407ff8SMika Kuoppala 233205407ff8SMika Kuoppala if (rings_hung) 233305407ff8SMika Kuoppala return i915_handle_error(dev, true); 233405407ff8SMika Kuoppala 233505407ff8SMika Kuoppala if (busy_count) 233605407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 233705407ff8SMika Kuoppala * being added */ 233810cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 233910cd45b6SMika Kuoppala } 234010cd45b6SMika Kuoppala 234110cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 234210cd45b6SMika Kuoppala { 234310cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 234410cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 234510cd45b6SMika Kuoppala return; 234610cd45b6SMika Kuoppala 234799584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 234810cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2349f65d9421SBen Gamari } 2350f65d9421SBen Gamari 235191738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 235291738a95SPaulo Zanoni { 235391738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 235491738a95SPaulo Zanoni 235591738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 235691738a95SPaulo Zanoni return; 235791738a95SPaulo Zanoni 235891738a95SPaulo Zanoni /* south display irq */ 235991738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 236091738a95SPaulo Zanoni /* 236191738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 236291738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 236391738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 236491738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 236591738a95SPaulo Zanoni */ 236691738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 236791738a95SPaulo Zanoni POSTING_READ(SDEIER); 236891738a95SPaulo Zanoni } 236991738a95SPaulo Zanoni 2370d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2371d18ea1b5SDaniel Vetter { 2372d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2373d18ea1b5SDaniel Vetter 2374d18ea1b5SDaniel Vetter /* and GT */ 2375d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2376d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2377d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2378d18ea1b5SDaniel Vetter 2379d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2380d18ea1b5SDaniel Vetter /* and PM */ 2381d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2382d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2383d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2384d18ea1b5SDaniel Vetter } 2385d18ea1b5SDaniel Vetter } 2386d18ea1b5SDaniel Vetter 2387c0e09200SDave Airlie /* drm_dma.h hooks 2388c0e09200SDave Airlie */ 2389f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2390036a4a7dSZhenyu Wang { 2391036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2392036a4a7dSZhenyu Wang 23934697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 23944697995bSJesse Barnes 2395036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2396bdfcdb63SDaniel Vetter 2397036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2398036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 23993143a2bfSChris Wilson POSTING_READ(DEIER); 2400036a4a7dSZhenyu Wang 2401d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2402c650156aSZhenyu Wang 240391738a95SPaulo Zanoni ibx_irq_preinstall(dev); 24047d99163dSBen Widawsky } 24057d99163dSBen Widawsky 24067e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 24077e231dbeSJesse Barnes { 24087e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 24097e231dbeSJesse Barnes int pipe; 24107e231dbeSJesse Barnes 24117e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 24127e231dbeSJesse Barnes 24137e231dbeSJesse Barnes /* VLV magic */ 24147e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 24157e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 24167e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 24177e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 24187e231dbeSJesse Barnes 24197e231dbeSJesse Barnes /* and GT */ 24207e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 24217e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2422d18ea1b5SDaniel Vetter 2423d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 24247e231dbeSJesse Barnes 24257e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 24267e231dbeSJesse Barnes 24277e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 24287e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 24297e231dbeSJesse Barnes for_each_pipe(pipe) 24307e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 24317e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 24327e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 24337e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 24347e231dbeSJesse Barnes POSTING_READ(VLV_IER); 24357e231dbeSJesse Barnes } 24367e231dbeSJesse Barnes 243782a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 243882a28bcfSDaniel Vetter { 243982a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 244082a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 244182a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2442fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 244382a28bcfSDaniel Vetter 244482a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2445fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 244682a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2447cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2448fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 244982a28bcfSDaniel Vetter } else { 2450fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 245182a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2452cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2453fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 245482a28bcfSDaniel Vetter } 245582a28bcfSDaniel Vetter 2456fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 245782a28bcfSDaniel Vetter 24587fe0b973SKeith Packard /* 24597fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 24607fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 24617fe0b973SKeith Packard * 24627fe0b973SKeith Packard * This register is the same on all known PCH chips. 24637fe0b973SKeith Packard */ 24647fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 24657fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 24667fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 24677fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 24687fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 24697fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 24707fe0b973SKeith Packard } 24717fe0b973SKeith Packard 2472d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2473d46da437SPaulo Zanoni { 2474d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 247582a28bcfSDaniel Vetter u32 mask; 2476d46da437SPaulo Zanoni 2477692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2478692a04cfSDaniel Vetter return; 2479692a04cfSDaniel Vetter 24808664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 24818664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2482de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 24838664281bSPaulo Zanoni } else { 24848664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 24858664281bSPaulo Zanoni 24868664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 24878664281bSPaulo Zanoni } 2488ab5c608bSBen Widawsky 2489d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2490d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2491d46da437SPaulo Zanoni } 2492d46da437SPaulo Zanoni 24930a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 24940a9a8c91SDaniel Vetter { 24950a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 24960a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 24970a9a8c91SDaniel Vetter 24980a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 24990a9a8c91SDaniel Vetter 25000a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 2501040d2baaSBen Widawsky if (HAS_L3_DPF(dev)) { 25020a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 250335a85ac6SBen Widawsky dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 250435a85ac6SBen Widawsky gt_irqs |= GT_PARITY_ERROR(dev); 25050a9a8c91SDaniel Vetter } 25060a9a8c91SDaniel Vetter 25070a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 25080a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 25090a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 25100a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 25110a9a8c91SDaniel Vetter } else { 25120a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 25130a9a8c91SDaniel Vetter } 25140a9a8c91SDaniel Vetter 25150a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 25160a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 25170a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 25180a9a8c91SDaniel Vetter POSTING_READ(GTIER); 25190a9a8c91SDaniel Vetter 25200a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 25210a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 25220a9a8c91SDaniel Vetter 25230a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 25240a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 25250a9a8c91SDaniel Vetter 2526605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = 0xffffffff; 25270a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2528605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 25290a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 25300a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 25310a9a8c91SDaniel Vetter } 25320a9a8c91SDaniel Vetter } 25330a9a8c91SDaniel Vetter 2534f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2535036a4a7dSZhenyu Wang { 25364bc9d430SDaniel Vetter unsigned long irqflags; 2537036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 25388e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 25398e76f8dcSPaulo Zanoni 25408e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 25418e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 25428e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 25438e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 25448e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 25458e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 25468e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 25478e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 25488e76f8dcSPaulo Zanoni 25498e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 25508e76f8dcSPaulo Zanoni } else { 25518e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2552ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 25535b3a856bSDaniel Vetter DE_AUX_CHANNEL_A | 25545b3a856bSDaniel Vetter DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 25555b3a856bSDaniel Vetter DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 25565b3a856bSDaniel Vetter DE_POISON); 25578e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 25588e76f8dcSPaulo Zanoni } 2559036a4a7dSZhenyu Wang 25601ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2561036a4a7dSZhenyu Wang 2562036a4a7dSZhenyu Wang /* should always can generate irq */ 2563036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 25641ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 25658e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 25663143a2bfSChris Wilson POSTING_READ(DEIER); 2567036a4a7dSZhenyu Wang 25680a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2569036a4a7dSZhenyu Wang 2570d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 25717fe0b973SKeith Packard 2572f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 25736005ce42SDaniel Vetter /* Enable PCU event interrupts 25746005ce42SDaniel Vetter * 25756005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 25764bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 25774bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 25784bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2579f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 25804bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2581f97108d1SJesse Barnes } 2582f97108d1SJesse Barnes 2583036a4a7dSZhenyu Wang return 0; 2584036a4a7dSZhenyu Wang } 2585036a4a7dSZhenyu Wang 25867e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 25877e231dbeSJesse Barnes { 25887e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 25897e231dbeSJesse Barnes u32 enable_mask; 2590379ef82dSDaniel Vetter u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2591379ef82dSDaniel Vetter PIPE_CRC_DONE_ENABLE; 2592b79480baSDaniel Vetter unsigned long irqflags; 25937e231dbeSJesse Barnes 25947e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 259531acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 259631acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 259731acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 25987e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 25997e231dbeSJesse Barnes 260031acc7f5SJesse Barnes /* 260131acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 260231acc7f5SJesse Barnes * toggle them based on usage. 260331acc7f5SJesse Barnes */ 260431acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 260531acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 260631acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 26077e231dbeSJesse Barnes 260820afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 260920afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 261020afbda2SDaniel Vetter 26117e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 26127e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 26137e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26147e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 26157e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 26167e231dbeSJesse Barnes POSTING_READ(VLV_IER); 26177e231dbeSJesse Barnes 2618b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2619b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2620b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 262131acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2622515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 262331acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2624b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 262531acc7f5SJesse Barnes 26267e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26277e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26287e231dbeSJesse Barnes 26290a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 26307e231dbeSJesse Barnes 26317e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 26327e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 26337e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 26347e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 26357e231dbeSJesse Barnes #endif 26367e231dbeSJesse Barnes 26377e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 263820afbda2SDaniel Vetter 263920afbda2SDaniel Vetter return 0; 264020afbda2SDaniel Vetter } 264120afbda2SDaniel Vetter 26427e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 26437e231dbeSJesse Barnes { 26447e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26457e231dbeSJesse Barnes int pipe; 26467e231dbeSJesse Barnes 26477e231dbeSJesse Barnes if (!dev_priv) 26487e231dbeSJesse Barnes return; 26497e231dbeSJesse Barnes 2650ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2651ac4c16c5SEgbert Eich 26527e231dbeSJesse Barnes for_each_pipe(pipe) 26537e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 26547e231dbeSJesse Barnes 26557e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 26567e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 26577e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 26587e231dbeSJesse Barnes for_each_pipe(pipe) 26597e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 26607e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26617e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 26627e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 26637e231dbeSJesse Barnes POSTING_READ(VLV_IER); 26647e231dbeSJesse Barnes } 26657e231dbeSJesse Barnes 2666f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2667036a4a7dSZhenyu Wang { 2668036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26694697995bSJesse Barnes 26704697995bSJesse Barnes if (!dev_priv) 26714697995bSJesse Barnes return; 26724697995bSJesse Barnes 2673ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2674ac4c16c5SEgbert Eich 2675036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2676036a4a7dSZhenyu Wang 2677036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2678036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2679036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 26808664281bSPaulo Zanoni if (IS_GEN7(dev)) 26818664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2682036a4a7dSZhenyu Wang 2683036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2684036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2685036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2686192aac1fSKeith Packard 2687ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2688ab5c608bSBen Widawsky return; 2689ab5c608bSBen Widawsky 2690192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2691192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2692192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 26938664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 26948664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2695036a4a7dSZhenyu Wang } 2696036a4a7dSZhenyu Wang 2697c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2698c2798b19SChris Wilson { 2699c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2700c2798b19SChris Wilson int pipe; 2701c2798b19SChris Wilson 2702c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2703c2798b19SChris Wilson 2704c2798b19SChris Wilson for_each_pipe(pipe) 2705c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2706c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2707c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2708c2798b19SChris Wilson POSTING_READ16(IER); 2709c2798b19SChris Wilson } 2710c2798b19SChris Wilson 2711c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2712c2798b19SChris Wilson { 2713c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2714379ef82dSDaniel Vetter unsigned long irqflags; 2715c2798b19SChris Wilson 2716c2798b19SChris Wilson I915_WRITE16(EMR, 2717c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2718c2798b19SChris Wilson 2719c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2720c2798b19SChris Wilson dev_priv->irq_mask = 2721c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2722c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2723c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2724c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2725c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2726c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2727c2798b19SChris Wilson 2728c2798b19SChris Wilson I915_WRITE16(IER, 2729c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2730c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2731c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2732c2798b19SChris Wilson I915_USER_INTERRUPT); 2733c2798b19SChris Wilson POSTING_READ16(IER); 2734c2798b19SChris Wilson 2735379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2736379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2737379ef82dSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2738379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE); 2739379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE); 2740379ef82dSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2741379ef82dSDaniel Vetter 2742c2798b19SChris Wilson return 0; 2743c2798b19SChris Wilson } 2744c2798b19SChris Wilson 274590a72f87SVille Syrjälä /* 274690a72f87SVille Syrjälä * Returns true when a page flip has completed. 274790a72f87SVille Syrjälä */ 274890a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 274990a72f87SVille Syrjälä int pipe, u16 iir) 275090a72f87SVille Syrjälä { 275190a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 275290a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 275390a72f87SVille Syrjälä 275490a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 275590a72f87SVille Syrjälä return false; 275690a72f87SVille Syrjälä 275790a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 275890a72f87SVille Syrjälä return false; 275990a72f87SVille Syrjälä 276090a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 276190a72f87SVille Syrjälä 276290a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 276390a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 276490a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 276590a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 276690a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 276790a72f87SVille Syrjälä */ 276890a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 276990a72f87SVille Syrjälä return false; 277090a72f87SVille Syrjälä 277190a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 277290a72f87SVille Syrjälä 277390a72f87SVille Syrjälä return true; 277490a72f87SVille Syrjälä } 277590a72f87SVille Syrjälä 2776ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2777c2798b19SChris Wilson { 2778c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2779c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2780c2798b19SChris Wilson u16 iir, new_iir; 2781c2798b19SChris Wilson u32 pipe_stats[2]; 2782c2798b19SChris Wilson unsigned long irqflags; 2783c2798b19SChris Wilson int pipe; 2784c2798b19SChris Wilson u16 flip_mask = 2785c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2786c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2787c2798b19SChris Wilson 2788c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2789c2798b19SChris Wilson 2790c2798b19SChris Wilson iir = I915_READ16(IIR); 2791c2798b19SChris Wilson if (iir == 0) 2792c2798b19SChris Wilson return IRQ_NONE; 2793c2798b19SChris Wilson 2794c2798b19SChris Wilson while (iir & ~flip_mask) { 2795c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2796c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 2797c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 2798c2798b19SChris Wilson * interrupts (for non-MSI). 2799c2798b19SChris Wilson */ 2800c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2801c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2802c2798b19SChris Wilson i915_handle_error(dev, false); 2803c2798b19SChris Wilson 2804c2798b19SChris Wilson for_each_pipe(pipe) { 2805c2798b19SChris Wilson int reg = PIPESTAT(pipe); 2806c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2807c2798b19SChris Wilson 2808c2798b19SChris Wilson /* 2809c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 2810c2798b19SChris Wilson */ 2811c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2812c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2813c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2814c2798b19SChris Wilson pipe_name(pipe)); 2815c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2816c2798b19SChris Wilson } 2817c2798b19SChris Wilson } 2818c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2819c2798b19SChris Wilson 2820c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 2821c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 2822c2798b19SChris Wilson 2823d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 2824c2798b19SChris Wilson 2825c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 2826c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2827c2798b19SChris Wilson 28284356d586SDaniel Vetter for_each_pipe(pipe) { 28294356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 28304356d586SDaniel Vetter i8xx_handle_vblank(dev, pipe, iir)) 28314356d586SDaniel Vetter flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 2832c2798b19SChris Wilson 28334356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2834277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 28354356d586SDaniel Vetter } 2836c2798b19SChris Wilson 2837c2798b19SChris Wilson iir = new_iir; 2838c2798b19SChris Wilson } 2839c2798b19SChris Wilson 2840c2798b19SChris Wilson return IRQ_HANDLED; 2841c2798b19SChris Wilson } 2842c2798b19SChris Wilson 2843c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 2844c2798b19SChris Wilson { 2845c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2846c2798b19SChris Wilson int pipe; 2847c2798b19SChris Wilson 2848c2798b19SChris Wilson for_each_pipe(pipe) { 2849c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 2850c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2851c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2852c2798b19SChris Wilson } 2853c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2854c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2855c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 2856c2798b19SChris Wilson } 2857c2798b19SChris Wilson 2858a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 2859a266c7d5SChris Wilson { 2860a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2861a266c7d5SChris Wilson int pipe; 2862a266c7d5SChris Wilson 2863a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2864a266c7d5SChris Wilson 2865a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2866a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2867a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2868a266c7d5SChris Wilson } 2869a266c7d5SChris Wilson 287000d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 2871a266c7d5SChris Wilson for_each_pipe(pipe) 2872a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2873a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2874a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2875a266c7d5SChris Wilson POSTING_READ(IER); 2876a266c7d5SChris Wilson } 2877a266c7d5SChris Wilson 2878a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 2879a266c7d5SChris Wilson { 2880a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 288138bde180SChris Wilson u32 enable_mask; 2882379ef82dSDaniel Vetter unsigned long irqflags; 2883a266c7d5SChris Wilson 288438bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 288538bde180SChris Wilson 288638bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 288738bde180SChris Wilson dev_priv->irq_mask = 288838bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 288938bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 289038bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 289138bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 289238bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 289338bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 289438bde180SChris Wilson 289538bde180SChris Wilson enable_mask = 289638bde180SChris Wilson I915_ASLE_INTERRUPT | 289738bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 289838bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 289938bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 290038bde180SChris Wilson I915_USER_INTERRUPT; 290138bde180SChris Wilson 2902a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 290320afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 290420afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 290520afbda2SDaniel Vetter 2906a266c7d5SChris Wilson /* Enable in IER... */ 2907a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2908a266c7d5SChris Wilson /* and unmask in IMR */ 2909a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2910a266c7d5SChris Wilson } 2911a266c7d5SChris Wilson 2912a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2913a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2914a266c7d5SChris Wilson POSTING_READ(IER); 2915a266c7d5SChris Wilson 2916f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 291720afbda2SDaniel Vetter 2918379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2919379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2920379ef82dSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2921379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE); 2922379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE); 2923379ef82dSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2924379ef82dSDaniel Vetter 292520afbda2SDaniel Vetter return 0; 292620afbda2SDaniel Vetter } 292720afbda2SDaniel Vetter 292890a72f87SVille Syrjälä /* 292990a72f87SVille Syrjälä * Returns true when a page flip has completed. 293090a72f87SVille Syrjälä */ 293190a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 293290a72f87SVille Syrjälä int plane, int pipe, u32 iir) 293390a72f87SVille Syrjälä { 293490a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 293590a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 293690a72f87SVille Syrjälä 293790a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 293890a72f87SVille Syrjälä return false; 293990a72f87SVille Syrjälä 294090a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 294190a72f87SVille Syrjälä return false; 294290a72f87SVille Syrjälä 294390a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 294490a72f87SVille Syrjälä 294590a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 294690a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 294790a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 294890a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 294990a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 295090a72f87SVille Syrjälä */ 295190a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 295290a72f87SVille Syrjälä return false; 295390a72f87SVille Syrjälä 295490a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 295590a72f87SVille Syrjälä 295690a72f87SVille Syrjälä return true; 295790a72f87SVille Syrjälä } 295890a72f87SVille Syrjälä 2959ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 2960a266c7d5SChris Wilson { 2961a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2962a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 29638291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2964a266c7d5SChris Wilson unsigned long irqflags; 296538bde180SChris Wilson u32 flip_mask = 296638bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 296738bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 296838bde180SChris Wilson int pipe, ret = IRQ_NONE; 2969a266c7d5SChris Wilson 2970a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2971a266c7d5SChris Wilson 2972a266c7d5SChris Wilson iir = I915_READ(IIR); 297338bde180SChris Wilson do { 297438bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 29758291ee90SChris Wilson bool blc_event = false; 2976a266c7d5SChris Wilson 2977a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2978a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2979a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2980a266c7d5SChris Wilson * interrupts (for non-MSI). 2981a266c7d5SChris Wilson */ 2982a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2983a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2984a266c7d5SChris Wilson i915_handle_error(dev, false); 2985a266c7d5SChris Wilson 2986a266c7d5SChris Wilson for_each_pipe(pipe) { 2987a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2988a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2989a266c7d5SChris Wilson 299038bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 2991a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2992a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2993a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2994a266c7d5SChris Wilson pipe_name(pipe)); 2995a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 299638bde180SChris Wilson irq_received = true; 2997a266c7d5SChris Wilson } 2998a266c7d5SChris Wilson } 2999a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3000a266c7d5SChris Wilson 3001a266c7d5SChris Wilson if (!irq_received) 3002a266c7d5SChris Wilson break; 3003a266c7d5SChris Wilson 3004a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3005a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 3006a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3007a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3008b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3009a266c7d5SChris Wilson 3010a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3011a266c7d5SChris Wilson hotplug_status); 301291d131d2SDaniel Vetter 301310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 301491d131d2SDaniel Vetter 3015a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 301638bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 3017a266c7d5SChris Wilson } 3018a266c7d5SChris Wilson 301938bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 3020a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3021a266c7d5SChris Wilson 3022a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3023a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3024a266c7d5SChris Wilson 3025a266c7d5SChris Wilson for_each_pipe(pipe) { 302638bde180SChris Wilson int plane = pipe; 302738bde180SChris Wilson if (IS_MOBILE(dev)) 302838bde180SChris Wilson plane = !plane; 30295e2032d4SVille Syrjälä 303090a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 303190a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 303290a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3033a266c7d5SChris Wilson 3034a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3035a266c7d5SChris Wilson blc_event = true; 30364356d586SDaniel Vetter 30374356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3038277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 3039a266c7d5SChris Wilson } 3040a266c7d5SChris Wilson 3041a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3042a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3043a266c7d5SChris Wilson 3044a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3045a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3046a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3047a266c7d5SChris Wilson * we would never get another interrupt. 3048a266c7d5SChris Wilson * 3049a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3050a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3051a266c7d5SChris Wilson * another one. 3052a266c7d5SChris Wilson * 3053a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3054a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3055a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3056a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3057a266c7d5SChris Wilson * stray interrupts. 3058a266c7d5SChris Wilson */ 305938bde180SChris Wilson ret = IRQ_HANDLED; 3060a266c7d5SChris Wilson iir = new_iir; 306138bde180SChris Wilson } while (iir & ~flip_mask); 3062a266c7d5SChris Wilson 3063d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 30648291ee90SChris Wilson 3065a266c7d5SChris Wilson return ret; 3066a266c7d5SChris Wilson } 3067a266c7d5SChris Wilson 3068a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 3069a266c7d5SChris Wilson { 3070a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3071a266c7d5SChris Wilson int pipe; 3072a266c7d5SChris Wilson 3073ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3074ac4c16c5SEgbert Eich 3075a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3076a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3077a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3078a266c7d5SChris Wilson } 3079a266c7d5SChris Wilson 308000d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 308155b39755SChris Wilson for_each_pipe(pipe) { 308255b39755SChris Wilson /* Clear enable bits; then clear status bits */ 3083a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 308455b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 308555b39755SChris Wilson } 3086a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3087a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3088a266c7d5SChris Wilson 3089a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3090a266c7d5SChris Wilson } 3091a266c7d5SChris Wilson 3092a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 3093a266c7d5SChris Wilson { 3094a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3095a266c7d5SChris Wilson int pipe; 3096a266c7d5SChris Wilson 3097a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3098a266c7d5SChris Wilson 3099a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3100a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3101a266c7d5SChris Wilson 3102a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 3103a266c7d5SChris Wilson for_each_pipe(pipe) 3104a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3105a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3106a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3107a266c7d5SChris Wilson POSTING_READ(IER); 3108a266c7d5SChris Wilson } 3109a266c7d5SChris Wilson 3110a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 3111a266c7d5SChris Wilson { 3112a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3113bbba0a97SChris Wilson u32 enable_mask; 3114a266c7d5SChris Wilson u32 error_mask; 3115b79480baSDaniel Vetter unsigned long irqflags; 3116a266c7d5SChris Wilson 3117a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 3118bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3119adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 3120bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3121bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3122bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3123bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3124bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3125bbba0a97SChris Wilson 3126bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 312721ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 312821ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3129bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 3130bbba0a97SChris Wilson 3131bbba0a97SChris Wilson if (IS_G4X(dev)) 3132bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 3133a266c7d5SChris Wilson 3134b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3135b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 3136b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3137515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3138379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE); 3139379ef82dSDaniel Vetter i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE); 3140b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3141a266c7d5SChris Wilson 3142a266c7d5SChris Wilson /* 3143a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 3144a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 3145a266c7d5SChris Wilson */ 3146a266c7d5SChris Wilson if (IS_G4X(dev)) { 3147a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 3148a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 3149a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 3150a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3151a266c7d5SChris Wilson } else { 3152a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 3153a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3154a266c7d5SChris Wilson } 3155a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 3156a266c7d5SChris Wilson 3157a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3158a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3159a266c7d5SChris Wilson POSTING_READ(IER); 3160a266c7d5SChris Wilson 316120afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 316220afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 316320afbda2SDaniel Vetter 3164f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 316520afbda2SDaniel Vetter 316620afbda2SDaniel Vetter return 0; 316720afbda2SDaniel Vetter } 316820afbda2SDaniel Vetter 3169bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 317020afbda2SDaniel Vetter { 317120afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3172e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3173cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 317420afbda2SDaniel Vetter u32 hotplug_en; 317520afbda2SDaniel Vetter 3176b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 3177b5ea2d56SDaniel Vetter 3178bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 3179bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3180bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3181adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 3182e5868a31SEgbert Eich /* enable bits are the same for all generations */ 3183cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3184cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3185cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3186a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 3187a266c7d5SChris Wilson to generate a spurious hotplug event about three 3188a266c7d5SChris Wilson seconds later. So just do it once. 3189a266c7d5SChris Wilson */ 3190a266c7d5SChris Wilson if (IS_G4X(dev)) 3191a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 319285fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3193a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3194a266c7d5SChris Wilson 3195a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 3196a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3197a266c7d5SChris Wilson } 3198bac56d5bSEgbert Eich } 3199a266c7d5SChris Wilson 3200ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 3201a266c7d5SChris Wilson { 3202a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3203a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3204a266c7d5SChris Wilson u32 iir, new_iir; 3205a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 3206a266c7d5SChris Wilson unsigned long irqflags; 3207a266c7d5SChris Wilson int irq_received; 3208a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 320921ad8330SVille Syrjälä u32 flip_mask = 321021ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 321121ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3212a266c7d5SChris Wilson 3213a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3214a266c7d5SChris Wilson 3215a266c7d5SChris Wilson iir = I915_READ(IIR); 3216a266c7d5SChris Wilson 3217a266c7d5SChris Wilson for (;;) { 32182c8ba29fSChris Wilson bool blc_event = false; 32192c8ba29fSChris Wilson 322021ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 3221a266c7d5SChris Wilson 3222a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3223a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3224a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3225a266c7d5SChris Wilson * interrupts (for non-MSI). 3226a266c7d5SChris Wilson */ 3227a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3228a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3229a266c7d5SChris Wilson i915_handle_error(dev, false); 3230a266c7d5SChris Wilson 3231a266c7d5SChris Wilson for_each_pipe(pipe) { 3232a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3233a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3234a266c7d5SChris Wilson 3235a266c7d5SChris Wilson /* 3236a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 3237a266c7d5SChris Wilson */ 3238a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3239a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3240a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3241a266c7d5SChris Wilson pipe_name(pipe)); 3242a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3243a266c7d5SChris Wilson irq_received = 1; 3244a266c7d5SChris Wilson } 3245a266c7d5SChris Wilson } 3246a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3247a266c7d5SChris Wilson 3248a266c7d5SChris Wilson if (!irq_received) 3249a266c7d5SChris Wilson break; 3250a266c7d5SChris Wilson 3251a266c7d5SChris Wilson ret = IRQ_HANDLED; 3252a266c7d5SChris Wilson 3253a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3254adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3255a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3256b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3257b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 32584f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 3259a266c7d5SChris Wilson 3260a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3261a266c7d5SChris Wilson hotplug_status); 326291d131d2SDaniel Vetter 326310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 326410a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 326591d131d2SDaniel Vetter 3266a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3267a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 3268a266c7d5SChris Wilson } 3269a266c7d5SChris Wilson 327021ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3271a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3272a266c7d5SChris Wilson 3273a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3274a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3275a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3276a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3277a266c7d5SChris Wilson 3278a266c7d5SChris Wilson for_each_pipe(pipe) { 32792c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 328090a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 328190a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3282a266c7d5SChris Wilson 3283a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3284a266c7d5SChris Wilson blc_event = true; 32854356d586SDaniel Vetter 32864356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3287277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 3288a266c7d5SChris Wilson } 3289a266c7d5SChris Wilson 3290a266c7d5SChris Wilson 3291a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3292a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3293a266c7d5SChris Wilson 3294515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3295515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3296515ac2bbSDaniel Vetter 3297a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3298a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3299a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3300a266c7d5SChris Wilson * we would never get another interrupt. 3301a266c7d5SChris Wilson * 3302a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3303a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3304a266c7d5SChris Wilson * another one. 3305a266c7d5SChris Wilson * 3306a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3307a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3308a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3309a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3310a266c7d5SChris Wilson * stray interrupts. 3311a266c7d5SChris Wilson */ 3312a266c7d5SChris Wilson iir = new_iir; 3313a266c7d5SChris Wilson } 3314a266c7d5SChris Wilson 3315d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 33162c8ba29fSChris Wilson 3317a266c7d5SChris Wilson return ret; 3318a266c7d5SChris Wilson } 3319a266c7d5SChris Wilson 3320a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3321a266c7d5SChris Wilson { 3322a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3323a266c7d5SChris Wilson int pipe; 3324a266c7d5SChris Wilson 3325a266c7d5SChris Wilson if (!dev_priv) 3326a266c7d5SChris Wilson return; 3327a266c7d5SChris Wilson 3328ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3329ac4c16c5SEgbert Eich 3330a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3331a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3332a266c7d5SChris Wilson 3333a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3334a266c7d5SChris Wilson for_each_pipe(pipe) 3335a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3336a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3337a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3338a266c7d5SChris Wilson 3339a266c7d5SChris Wilson for_each_pipe(pipe) 3340a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3341a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3342a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3343a266c7d5SChris Wilson } 3344a266c7d5SChris Wilson 3345ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3346ac4c16c5SEgbert Eich { 3347ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3348ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3349ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3350ac4c16c5SEgbert Eich unsigned long irqflags; 3351ac4c16c5SEgbert Eich int i; 3352ac4c16c5SEgbert Eich 3353ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3354ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3355ac4c16c5SEgbert Eich struct drm_connector *connector; 3356ac4c16c5SEgbert Eich 3357ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3358ac4c16c5SEgbert Eich continue; 3359ac4c16c5SEgbert Eich 3360ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3361ac4c16c5SEgbert Eich 3362ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3363ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3364ac4c16c5SEgbert Eich 3365ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3366ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3367ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3368ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3369ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3370ac4c16c5SEgbert Eich if (!connector->polled) 3371ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3372ac4c16c5SEgbert Eich } 3373ac4c16c5SEgbert Eich } 3374ac4c16c5SEgbert Eich } 3375ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3376ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3377ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3378ac4c16c5SEgbert Eich } 3379ac4c16c5SEgbert Eich 3380f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3381f71d4af4SJesse Barnes { 33828b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 33838b2e326dSChris Wilson 33848b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 338599584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3386c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3387a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 33888b2e326dSChris Wilson 338999584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 339099584db3SDaniel Vetter i915_hangcheck_elapsed, 339161bac78eSDaniel Vetter (unsigned long) dev); 3392ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3393ac4c16c5SEgbert Eich (unsigned long) dev_priv); 339461bac78eSDaniel Vetter 339597a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 33969ee32feaSDaniel Vetter 33974cdb83ecSVille Syrjälä if (IS_GEN2(dev)) { 33984cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 33994cdb83ecSVille Syrjälä dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 34004cdb83ecSVille Syrjälä } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3401f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3402f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3403391f75e2SVille Syrjälä } else { 3404391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 3405391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3406f71d4af4SJesse Barnes } 3407f71d4af4SJesse Barnes 3408c2baf4b7SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3409f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3410f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3411c2baf4b7SVille Syrjälä } 3412f71d4af4SJesse Barnes 34137e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 34147e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 34157e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 34167e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 34177e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 34187e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 34197e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3420fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3421f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3422f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3423f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3424f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3425f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3426f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3427f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 342882a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3429f71d4af4SJesse Barnes } else { 3430c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3431c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3432c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3433c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3434c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3435a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3436a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3437a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3438a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3439a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 344020afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3441c2798b19SChris Wilson } else { 3442a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3443a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3444a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3445a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3446bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3447c2798b19SChris Wilson } 3448f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3449f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3450f71d4af4SJesse Barnes } 3451f71d4af4SJesse Barnes } 345220afbda2SDaniel Vetter 345320afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 345420afbda2SDaniel Vetter { 345520afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3456821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3457821450c6SEgbert Eich struct drm_connector *connector; 3458b5ea2d56SDaniel Vetter unsigned long irqflags; 3459821450c6SEgbert Eich int i; 346020afbda2SDaniel Vetter 3461821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3462821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3463821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3464821450c6SEgbert Eich } 3465821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3466821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3467821450c6SEgbert Eich connector->polled = intel_connector->polled; 3468821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3469821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3470821450c6SEgbert Eich } 3471b5ea2d56SDaniel Vetter 3472b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3473b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3474b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 347520afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 347620afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3477b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 347820afbda2SDaniel Vetter } 3479c67a470bSPaulo Zanoni 3480c67a470bSPaulo Zanoni /* Disable interrupts so we can allow Package C8+. */ 3481c67a470bSPaulo Zanoni void hsw_pc8_disable_interrupts(struct drm_device *dev) 3482c67a470bSPaulo Zanoni { 3483c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3484c67a470bSPaulo Zanoni unsigned long irqflags; 3485c67a470bSPaulo Zanoni 3486c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3487c67a470bSPaulo Zanoni 3488c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3489c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3490c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3491c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3492c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3493c67a470bSPaulo Zanoni 3494c67a470bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3495c67a470bSPaulo Zanoni ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3496c67a470bSPaulo Zanoni ilk_disable_gt_irq(dev_priv, 0xffffffff); 3497c67a470bSPaulo Zanoni snb_disable_pm_irq(dev_priv, 0xffffffff); 3498c67a470bSPaulo Zanoni 3499c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = true; 3500c67a470bSPaulo Zanoni 3501c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3502c67a470bSPaulo Zanoni } 3503c67a470bSPaulo Zanoni 3504c67a470bSPaulo Zanoni /* Restore interrupts so we can recover from Package C8+. */ 3505c67a470bSPaulo Zanoni void hsw_pc8_restore_interrupts(struct drm_device *dev) 3506c67a470bSPaulo Zanoni { 3507c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3508c67a470bSPaulo Zanoni unsigned long irqflags; 3509c67a470bSPaulo Zanoni uint32_t val, expected; 3510c67a470bSPaulo Zanoni 3511c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3512c67a470bSPaulo Zanoni 3513c67a470bSPaulo Zanoni val = I915_READ(DEIMR); 3514c67a470bSPaulo Zanoni expected = ~DE_PCH_EVENT_IVB; 3515c67a470bSPaulo Zanoni WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3516c67a470bSPaulo Zanoni 3517c67a470bSPaulo Zanoni val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3518c67a470bSPaulo Zanoni expected = ~SDE_HOTPLUG_MASK_CPT; 3519c67a470bSPaulo Zanoni WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3520c67a470bSPaulo Zanoni val, expected); 3521c67a470bSPaulo Zanoni 3522c67a470bSPaulo Zanoni val = I915_READ(GTIMR); 3523c67a470bSPaulo Zanoni expected = 0xffffffff; 3524c67a470bSPaulo Zanoni WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3525c67a470bSPaulo Zanoni 3526c67a470bSPaulo Zanoni val = I915_READ(GEN6_PMIMR); 3527c67a470bSPaulo Zanoni expected = 0xffffffff; 3528c67a470bSPaulo Zanoni WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3529c67a470bSPaulo Zanoni expected); 3530c67a470bSPaulo Zanoni 3531c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = false; 3532c67a470bSPaulo Zanoni 3533c67a470bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3534c67a470bSPaulo Zanoni ibx_enable_display_interrupt(dev_priv, 3535c67a470bSPaulo Zanoni ~dev_priv->pc8.regsave.sdeimr & 3536c67a470bSPaulo Zanoni ~SDE_HOTPLUG_MASK_CPT); 3537c67a470bSPaulo Zanoni ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3538c67a470bSPaulo Zanoni snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3539c67a470bSPaulo Zanoni I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3540c67a470bSPaulo Zanoni 3541c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3542c67a470bSPaulo Zanoni } 3543