1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34760285e7SDavid Howells #include <drm/drmP.h> 35760285e7SDavid Howells #include <drm/i915_drm.h> 36c0e09200SDave Airlie #include "i915_drv.h" 371c5d22f7SChris Wilson #include "i915_trace.h" 3879e53945SJesse Barnes #include "intel_drv.h" 39c0e09200SDave Airlie 40e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 41e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 42e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 45e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 46e5868a31SEgbert Eich }; 47e5868a31SEgbert Eich 48e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 49e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 5073c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 53e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 54e5868a31SEgbert Eich }; 55e5868a31SEgbert Eich 56e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 57e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 62e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 63e5868a31SEgbert Eich }; 64e5868a31SEgbert Eich 65e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 66e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 67e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 69e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 71e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 72e5868a31SEgbert Eich }; 73e5868a31SEgbert Eich 74e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 75e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 76e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 78e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 80e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 81e5868a31SEgbert Eich }; 82e5868a31SEgbert Eich 83036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 84995b6762SChris Wilson static void 85f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 86036a4a7dSZhenyu Wang { 874bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 884bc9d430SDaniel Vetter 89c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 90c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 91c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr &= ~mask; 92c67a470bSPaulo Zanoni return; 93c67a470bSPaulo Zanoni } 94c67a470bSPaulo Zanoni 951ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 961ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 971ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 983143a2bfSChris Wilson POSTING_READ(DEIMR); 99036a4a7dSZhenyu Wang } 100036a4a7dSZhenyu Wang } 101036a4a7dSZhenyu Wang 1020ff9800aSPaulo Zanoni static void 103f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 104036a4a7dSZhenyu Wang { 1054bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1064bc9d430SDaniel Vetter 107c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 108c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 109c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr |= mask; 110c67a470bSPaulo Zanoni return; 111c67a470bSPaulo Zanoni } 112c67a470bSPaulo Zanoni 1131ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1141ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1151ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1163143a2bfSChris Wilson POSTING_READ(DEIMR); 117036a4a7dSZhenyu Wang } 118036a4a7dSZhenyu Wang } 119036a4a7dSZhenyu Wang 12043eaea13SPaulo Zanoni /** 12143eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 12243eaea13SPaulo Zanoni * @dev_priv: driver private 12343eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 12443eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 12543eaea13SPaulo Zanoni */ 12643eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 12743eaea13SPaulo Zanoni uint32_t interrupt_mask, 12843eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 12943eaea13SPaulo Zanoni { 13043eaea13SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 13143eaea13SPaulo Zanoni 132c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 133c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 134c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 135c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 136c67a470bSPaulo Zanoni interrupt_mask); 137c67a470bSPaulo Zanoni return; 138c67a470bSPaulo Zanoni } 139c67a470bSPaulo Zanoni 14043eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 14143eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 14243eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 14343eaea13SPaulo Zanoni POSTING_READ(GTIMR); 14443eaea13SPaulo Zanoni } 14543eaea13SPaulo Zanoni 14643eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 14743eaea13SPaulo Zanoni { 14843eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 14943eaea13SPaulo Zanoni } 15043eaea13SPaulo Zanoni 15143eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 15243eaea13SPaulo Zanoni { 15343eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 15443eaea13SPaulo Zanoni } 15543eaea13SPaulo Zanoni 156edbfdb45SPaulo Zanoni /** 157edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 158edbfdb45SPaulo Zanoni * @dev_priv: driver private 159edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 160edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 161edbfdb45SPaulo Zanoni */ 162edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 163edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 164edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 165edbfdb45SPaulo Zanoni { 166605cd25bSPaulo Zanoni uint32_t new_val; 167edbfdb45SPaulo Zanoni 168edbfdb45SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 169edbfdb45SPaulo Zanoni 170c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled) { 171c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 172c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 173c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 174c67a470bSPaulo Zanoni interrupt_mask); 175c67a470bSPaulo Zanoni return; 176c67a470bSPaulo Zanoni } 177c67a470bSPaulo Zanoni 178605cd25bSPaulo Zanoni new_val = dev_priv->pm_irq_mask; 179f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 180f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 181f52ecbcfSPaulo Zanoni 182605cd25bSPaulo Zanoni if (new_val != dev_priv->pm_irq_mask) { 183605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = new_val; 184605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 185edbfdb45SPaulo Zanoni POSTING_READ(GEN6_PMIMR); 186edbfdb45SPaulo Zanoni } 187f52ecbcfSPaulo Zanoni } 188edbfdb45SPaulo Zanoni 189edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190edbfdb45SPaulo Zanoni { 191edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 192edbfdb45SPaulo Zanoni } 193edbfdb45SPaulo Zanoni 194edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 195edbfdb45SPaulo Zanoni { 196edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, 0); 197edbfdb45SPaulo Zanoni } 198edbfdb45SPaulo Zanoni 1998664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 2008664281bSPaulo Zanoni { 2018664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2028664281bSPaulo Zanoni struct intel_crtc *crtc; 2038664281bSPaulo Zanoni enum pipe pipe; 2048664281bSPaulo Zanoni 2054bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2064bc9d430SDaniel Vetter 2078664281bSPaulo Zanoni for_each_pipe(pipe) { 2088664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2098664281bSPaulo Zanoni 2108664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 2118664281bSPaulo Zanoni return false; 2128664281bSPaulo Zanoni } 2138664281bSPaulo Zanoni 2148664281bSPaulo Zanoni return true; 2158664281bSPaulo Zanoni } 2168664281bSPaulo Zanoni 2178664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 2188664281bSPaulo Zanoni { 2198664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2208664281bSPaulo Zanoni enum pipe pipe; 2218664281bSPaulo Zanoni struct intel_crtc *crtc; 2228664281bSPaulo Zanoni 223fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 224fee884edSDaniel Vetter 2258664281bSPaulo Zanoni for_each_pipe(pipe) { 2268664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2278664281bSPaulo Zanoni 2288664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 2298664281bSPaulo Zanoni return false; 2308664281bSPaulo Zanoni } 2318664281bSPaulo Zanoni 2328664281bSPaulo Zanoni return true; 2338664281bSPaulo Zanoni } 2348664281bSPaulo Zanoni 2358664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 2368664281bSPaulo Zanoni enum pipe pipe, bool enable) 2378664281bSPaulo Zanoni { 2388664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2398664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 2408664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 2418664281bSPaulo Zanoni 2428664281bSPaulo Zanoni if (enable) 2438664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2448664281bSPaulo Zanoni else 2458664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2468664281bSPaulo Zanoni } 2478664281bSPaulo Zanoni 2488664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 2497336df65SDaniel Vetter enum pipe pipe, bool enable) 2508664281bSPaulo Zanoni { 2518664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2528664281bSPaulo Zanoni if (enable) { 2537336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 2547336df65SDaniel Vetter 2558664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 2568664281bSPaulo Zanoni return; 2578664281bSPaulo Zanoni 2588664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 2598664281bSPaulo Zanoni } else { 2607336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 2617336df65SDaniel Vetter 2627336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 2638664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 2647336df65SDaniel Vetter 2657336df65SDaniel Vetter if (!was_enabled && 2667336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 2677336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 2687336df65SDaniel Vetter pipe_name(pipe)); 2697336df65SDaniel Vetter } 2708664281bSPaulo Zanoni } 2718664281bSPaulo Zanoni } 2728664281bSPaulo Zanoni 27338d83c96SDaniel Vetter static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 27438d83c96SDaniel Vetter enum pipe pipe, bool enable) 27538d83c96SDaniel Vetter { 27638d83c96SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 27738d83c96SDaniel Vetter 27838d83c96SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 27938d83c96SDaniel Vetter 28038d83c96SDaniel Vetter if (enable) 28138d83c96SDaniel Vetter dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 28238d83c96SDaniel Vetter else 28338d83c96SDaniel Vetter dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 28438d83c96SDaniel Vetter I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 28538d83c96SDaniel Vetter POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 28638d83c96SDaniel Vetter } 28738d83c96SDaniel Vetter 288fee884edSDaniel Vetter /** 289fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 290fee884edSDaniel Vetter * @dev_priv: driver private 291fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 292fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 293fee884edSDaniel Vetter */ 294fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 295fee884edSDaniel Vetter uint32_t interrupt_mask, 296fee884edSDaniel Vetter uint32_t enabled_irq_mask) 297fee884edSDaniel Vetter { 298fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 299fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 300fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 301fee884edSDaniel Vetter 302fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 303fee884edSDaniel Vetter 304c67a470bSPaulo Zanoni if (dev_priv->pc8.irqs_disabled && 305c67a470bSPaulo Zanoni (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 306c67a470bSPaulo Zanoni WARN(1, "IRQs disabled\n"); 307c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 308c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 309c67a470bSPaulo Zanoni interrupt_mask); 310c67a470bSPaulo Zanoni return; 311c67a470bSPaulo Zanoni } 312c67a470bSPaulo Zanoni 313fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 314fee884edSDaniel Vetter POSTING_READ(SDEIMR); 315fee884edSDaniel Vetter } 316fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 317fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 318fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 319fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 320fee884edSDaniel Vetter 321de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 322de28075dSDaniel Vetter enum transcoder pch_transcoder, 3238664281bSPaulo Zanoni bool enable) 3248664281bSPaulo Zanoni { 3258664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 326de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 327de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 3288664281bSPaulo Zanoni 3298664281bSPaulo Zanoni if (enable) 330fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 3318664281bSPaulo Zanoni else 332fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 3338664281bSPaulo Zanoni } 3348664281bSPaulo Zanoni 3358664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 3368664281bSPaulo Zanoni enum transcoder pch_transcoder, 3378664281bSPaulo Zanoni bool enable) 3388664281bSPaulo Zanoni { 3398664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3408664281bSPaulo Zanoni 3418664281bSPaulo Zanoni if (enable) { 3421dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 3431dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 3441dd246fbSDaniel Vetter 3458664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 3468664281bSPaulo Zanoni return; 3478664281bSPaulo Zanoni 348fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3498664281bSPaulo Zanoni } else { 3501dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 3511dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 3521dd246fbSDaniel Vetter 3531dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 354fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3551dd246fbSDaniel Vetter 3561dd246fbSDaniel Vetter if (!was_enabled && 3571dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 3581dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 3591dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 3601dd246fbSDaniel Vetter } 3618664281bSPaulo Zanoni } 3628664281bSPaulo Zanoni } 3638664281bSPaulo Zanoni 3648664281bSPaulo Zanoni /** 3658664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 3668664281bSPaulo Zanoni * @dev: drm device 3678664281bSPaulo Zanoni * @pipe: pipe 3688664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3698664281bSPaulo Zanoni * 3708664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 3718664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 3728664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 3738664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 3748664281bSPaulo Zanoni * bit for all the pipes. 3758664281bSPaulo Zanoni * 3768664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3778664281bSPaulo Zanoni */ 3788664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 3798664281bSPaulo Zanoni enum pipe pipe, bool enable) 3808664281bSPaulo Zanoni { 3818664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3828664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 3838664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3848664281bSPaulo Zanoni unsigned long flags; 3858664281bSPaulo Zanoni bool ret; 3868664281bSPaulo Zanoni 3878664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3888664281bSPaulo Zanoni 3898664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 3908664281bSPaulo Zanoni 3918664281bSPaulo Zanoni if (enable == ret) 3928664281bSPaulo Zanoni goto done; 3938664281bSPaulo Zanoni 3948664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 3958664281bSPaulo Zanoni 3968664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 3978664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 3988664281bSPaulo Zanoni else if (IS_GEN7(dev)) 3997336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 40038d83c96SDaniel Vetter else if (IS_GEN8(dev)) 40138d83c96SDaniel Vetter broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 4028664281bSPaulo Zanoni 4038664281bSPaulo Zanoni done: 4048664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4058664281bSPaulo Zanoni return ret; 4068664281bSPaulo Zanoni } 4078664281bSPaulo Zanoni 4088664281bSPaulo Zanoni /** 4098664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 4108664281bSPaulo Zanoni * @dev: drm device 4118664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 4128664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 4138664281bSPaulo Zanoni * 4148664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 4158664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 4168664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 4178664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 4188664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 4198664281bSPaulo Zanoni * 4208664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 4218664281bSPaulo Zanoni */ 4228664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 4238664281bSPaulo Zanoni enum transcoder pch_transcoder, 4248664281bSPaulo Zanoni bool enable) 4258664281bSPaulo Zanoni { 4268664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 427de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 428de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4298664281bSPaulo Zanoni unsigned long flags; 4308664281bSPaulo Zanoni bool ret; 4318664281bSPaulo Zanoni 432de28075dSDaniel Vetter /* 433de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 434de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 435de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 436de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 437de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 438de28075dSDaniel Vetter * crtc on LPT won't cause issues. 439de28075dSDaniel Vetter */ 4408664281bSPaulo Zanoni 4418664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 4428664281bSPaulo Zanoni 4438664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 4448664281bSPaulo Zanoni 4458664281bSPaulo Zanoni if (enable == ret) 4468664281bSPaulo Zanoni goto done; 4478664281bSPaulo Zanoni 4488664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 4498664281bSPaulo Zanoni 4508664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 451de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4528664281bSPaulo Zanoni else 4538664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4548664281bSPaulo Zanoni 4558664281bSPaulo Zanoni done: 4568664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4578664281bSPaulo Zanoni return ret; 4588664281bSPaulo Zanoni } 4598664281bSPaulo Zanoni 4608664281bSPaulo Zanoni 4617c463586SKeith Packard void 4623b6c42e8SDaniel Vetter i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 4637c463586SKeith Packard { 4649db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 46546c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4667c463586SKeith Packard 467b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 468b79480baSDaniel Vetter 46946c06a30SVille Syrjälä if ((pipestat & mask) == mask) 47046c06a30SVille Syrjälä return; 47146c06a30SVille Syrjälä 4727c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 47346c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 47446c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4753143a2bfSChris Wilson POSTING_READ(reg); 4767c463586SKeith Packard } 4777c463586SKeith Packard 4787c463586SKeith Packard void 4793b6c42e8SDaniel Vetter i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 4807c463586SKeith Packard { 4819db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 48246c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4837c463586SKeith Packard 484b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 485b79480baSDaniel Vetter 48646c06a30SVille Syrjälä if ((pipestat & mask) == 0) 48746c06a30SVille Syrjälä return; 48846c06a30SVille Syrjälä 48946c06a30SVille Syrjälä pipestat &= ~mask; 49046c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4913143a2bfSChris Wilson POSTING_READ(reg); 4927c463586SKeith Packard } 4937c463586SKeith Packard 494c0e09200SDave Airlie /** 495f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 49601c66889SZhao Yakui */ 497f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 49801c66889SZhao Yakui { 4991ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 5001ec14ad3SChris Wilson unsigned long irqflags; 5011ec14ad3SChris Wilson 502f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 503f49e38ddSJani Nikula return; 504f49e38ddSJani Nikula 5051ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 50601c66889SZhao Yakui 5073b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 508a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 5093b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, 5103b6c42e8SDaniel Vetter PIPE_LEGACY_BLC_EVENT_ENABLE); 5111ec14ad3SChris Wilson 5121ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 51301c66889SZhao Yakui } 51401c66889SZhao Yakui 51501c66889SZhao Yakui /** 5160a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 5170a3e67a4SJesse Barnes * @dev: DRM device 5180a3e67a4SJesse Barnes * @pipe: pipe to check 5190a3e67a4SJesse Barnes * 5200a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 5210a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 5220a3e67a4SJesse Barnes * before reading such registers if unsure. 5230a3e67a4SJesse Barnes */ 5240a3e67a4SJesse Barnes static int 5250a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 5260a3e67a4SJesse Barnes { 5270a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 528702e7a56SPaulo Zanoni 529a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 530a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 531a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 532a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 53371f8ba6bSPaulo Zanoni 534a01025afSDaniel Vetter return intel_crtc->active; 535a01025afSDaniel Vetter } else { 536a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 537a01025afSDaniel Vetter } 5380a3e67a4SJesse Barnes } 5390a3e67a4SJesse Barnes 5404cdb83ecSVille Syrjälä static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 5414cdb83ecSVille Syrjälä { 5424cdb83ecSVille Syrjälä /* Gen2 doesn't have a hardware frame counter */ 5434cdb83ecSVille Syrjälä return 0; 5444cdb83ecSVille Syrjälä } 5454cdb83ecSVille Syrjälä 54642f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 54742f52ef8SKeith Packard * we use as a pipe index 54842f52ef8SKeith Packard */ 549f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 5500a3e67a4SJesse Barnes { 5510a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5520a3e67a4SJesse Barnes unsigned long high_frame; 5530a3e67a4SJesse Barnes unsigned long low_frame; 554391f75e2SVille Syrjälä u32 high1, high2, low, pixel, vbl_start; 5550a3e67a4SJesse Barnes 5560a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 55744d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5589db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5590a3e67a4SJesse Barnes return 0; 5600a3e67a4SJesse Barnes } 5610a3e67a4SJesse Barnes 562391f75e2SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 563391f75e2SVille Syrjälä struct intel_crtc *intel_crtc = 564391f75e2SVille Syrjälä to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565391f75e2SVille Syrjälä const struct drm_display_mode *mode = 566391f75e2SVille Syrjälä &intel_crtc->config.adjusted_mode; 567391f75e2SVille Syrjälä 568391f75e2SVille Syrjälä vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 569391f75e2SVille Syrjälä } else { 570391f75e2SVille Syrjälä enum transcoder cpu_transcoder = 571391f75e2SVille Syrjälä intel_pipe_to_cpu_transcoder(dev_priv, pipe); 572391f75e2SVille Syrjälä u32 htotal; 573391f75e2SVille Syrjälä 574391f75e2SVille Syrjälä htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 575391f75e2SVille Syrjälä vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 576391f75e2SVille Syrjälä 577391f75e2SVille Syrjälä vbl_start *= htotal; 578391f75e2SVille Syrjälä } 579391f75e2SVille Syrjälä 5809db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 5819db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 5825eddb70bSChris Wilson 5830a3e67a4SJesse Barnes /* 5840a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 5850a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 5860a3e67a4SJesse Barnes * register. 5870a3e67a4SJesse Barnes */ 5880a3e67a4SJesse Barnes do { 5895eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 590391f75e2SVille Syrjälä low = I915_READ(low_frame); 5915eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5920a3e67a4SJesse Barnes } while (high1 != high2); 5930a3e67a4SJesse Barnes 5945eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 595391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 5965eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 597391f75e2SVille Syrjälä 598391f75e2SVille Syrjälä /* 599391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 600391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 601391f75e2SVille Syrjälä * counter against vblank start. 602391f75e2SVille Syrjälä */ 603391f75e2SVille Syrjälä return ((high1 << 8) | low) + (pixel >= vbl_start); 6040a3e67a4SJesse Barnes } 6050a3e67a4SJesse Barnes 606f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 6079880b7a5SJesse Barnes { 6089880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 6099db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 6109880b7a5SJesse Barnes 6119880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 61244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 6139db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 6149880b7a5SJesse Barnes return 0; 6159880b7a5SJesse Barnes } 6169880b7a5SJesse Barnes 6179880b7a5SJesse Barnes return I915_READ(reg); 6189880b7a5SJesse Barnes } 6199880b7a5SJesse Barnes 6207c06b08aSVille Syrjälä static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe) 62154ddcbd2SVille Syrjälä { 62254ddcbd2SVille Syrjälä struct drm_i915_private *dev_priv = dev->dev_private; 62354ddcbd2SVille Syrjälä uint32_t status; 62454ddcbd2SVille Syrjälä 62554ddcbd2SVille Syrjälä if (IS_VALLEYVIEW(dev)) { 62654ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 62754ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 62854ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 62954ddcbd2SVille Syrjälä 63054ddcbd2SVille Syrjälä return I915_READ(VLV_ISR) & status; 6317c06b08aSVille Syrjälä } else if (IS_GEN2(dev)) { 6327c06b08aSVille Syrjälä status = pipe == PIPE_A ? 6337c06b08aSVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 6347c06b08aSVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 6357c06b08aSVille Syrjälä 6367c06b08aSVille Syrjälä return I915_READ16(ISR) & status; 6377c06b08aSVille Syrjälä } else if (INTEL_INFO(dev)->gen < 5) { 63854ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 63954ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : 64054ddcbd2SVille Syrjälä I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 64154ddcbd2SVille Syrjälä 64254ddcbd2SVille Syrjälä return I915_READ(ISR) & status; 64354ddcbd2SVille Syrjälä } else if (INTEL_INFO(dev)->gen < 7) { 64454ddcbd2SVille Syrjälä status = pipe == PIPE_A ? 64554ddcbd2SVille Syrjälä DE_PIPEA_VBLANK : 64654ddcbd2SVille Syrjälä DE_PIPEB_VBLANK; 64754ddcbd2SVille Syrjälä 64854ddcbd2SVille Syrjälä return I915_READ(DEISR) & status; 64954ddcbd2SVille Syrjälä } else { 65054ddcbd2SVille Syrjälä switch (pipe) { 65154ddcbd2SVille Syrjälä default: 65254ddcbd2SVille Syrjälä case PIPE_A: 65354ddcbd2SVille Syrjälä status = DE_PIPEA_VBLANK_IVB; 65454ddcbd2SVille Syrjälä break; 65554ddcbd2SVille Syrjälä case PIPE_B: 65654ddcbd2SVille Syrjälä status = DE_PIPEB_VBLANK_IVB; 65754ddcbd2SVille Syrjälä break; 65854ddcbd2SVille Syrjälä case PIPE_C: 65954ddcbd2SVille Syrjälä status = DE_PIPEC_VBLANK_IVB; 66054ddcbd2SVille Syrjälä break; 66154ddcbd2SVille Syrjälä } 66254ddcbd2SVille Syrjälä 66354ddcbd2SVille Syrjälä return I915_READ(DEISR) & status; 66454ddcbd2SVille Syrjälä } 66554ddcbd2SVille Syrjälä } 66654ddcbd2SVille Syrjälä 667f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 6680af7e4dfSMario Kleiner int *vpos, int *hpos) 6690af7e4dfSMario Kleiner { 670c2baf4b7SVille Syrjälä struct drm_i915_private *dev_priv = dev->dev_private; 671c2baf4b7SVille Syrjälä struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 672c2baf4b7SVille Syrjälä struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 673c2baf4b7SVille Syrjälä const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 6743aa18df8SVille Syrjälä int position; 6750af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 6760af7e4dfSMario Kleiner bool in_vbl = true; 6770af7e4dfSMario Kleiner int ret = 0; 6780af7e4dfSMario Kleiner 679c2baf4b7SVille Syrjälä if (!intel_crtc->active) { 6800af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 6819db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 6820af7e4dfSMario Kleiner return 0; 6830af7e4dfSMario Kleiner } 6840af7e4dfSMario Kleiner 685c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 686c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 687c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 688c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 6890af7e4dfSMario Kleiner 690c2baf4b7SVille Syrjälä ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 691c2baf4b7SVille Syrjälä 6927c06b08aSVille Syrjälä if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 6930af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 6940af7e4dfSMario Kleiner * scanout position from Display scan line register. 6950af7e4dfSMario Kleiner */ 6967c06b08aSVille Syrjälä if (IS_GEN2(dev)) 6977c06b08aSVille Syrjälä position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 6987c06b08aSVille Syrjälä else 6997c06b08aSVille Syrjälä position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 70054ddcbd2SVille Syrjälä 70154ddcbd2SVille Syrjälä /* 70254ddcbd2SVille Syrjälä * The scanline counter increments at the leading edge 70354ddcbd2SVille Syrjälä * of hsync, ie. it completely misses the active portion 70454ddcbd2SVille Syrjälä * of the line. Fix up the counter at both edges of vblank 70554ddcbd2SVille Syrjälä * to get a more accurate picture whether we're in vblank 70654ddcbd2SVille Syrjälä * or not. 70754ddcbd2SVille Syrjälä */ 7087c06b08aSVille Syrjälä in_vbl = intel_pipe_in_vblank(dev, pipe); 70954ddcbd2SVille Syrjälä if ((in_vbl && position == vbl_start - 1) || 71054ddcbd2SVille Syrjälä (!in_vbl && position == vbl_end - 1)) 71154ddcbd2SVille Syrjälä position = (position + 1) % vtotal; 7120af7e4dfSMario Kleiner } else { 7130af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 7140af7e4dfSMario Kleiner * We can split this into vertical and horizontal 7150af7e4dfSMario Kleiner * scanout position. 7160af7e4dfSMario Kleiner */ 7170af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 7180af7e4dfSMario Kleiner 7193aa18df8SVille Syrjälä /* convert to pixel counts */ 7203aa18df8SVille Syrjälä vbl_start *= htotal; 7213aa18df8SVille Syrjälä vbl_end *= htotal; 7223aa18df8SVille Syrjälä vtotal *= htotal; 7233aa18df8SVille Syrjälä } 7243aa18df8SVille Syrjälä 7253aa18df8SVille Syrjälä in_vbl = position >= vbl_start && position < vbl_end; 7263aa18df8SVille Syrjälä 7273aa18df8SVille Syrjälä /* 7283aa18df8SVille Syrjälä * While in vblank, position will be negative 7293aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 7303aa18df8SVille Syrjälä * vblank, position will be positive counting 7313aa18df8SVille Syrjälä * up since vbl_end. 7323aa18df8SVille Syrjälä */ 7333aa18df8SVille Syrjälä if (position >= vbl_start) 7343aa18df8SVille Syrjälä position -= vbl_end; 7353aa18df8SVille Syrjälä else 7363aa18df8SVille Syrjälä position += vtotal - vbl_end; 7373aa18df8SVille Syrjälä 7387c06b08aSVille Syrjälä if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 7393aa18df8SVille Syrjälä *vpos = position; 7403aa18df8SVille Syrjälä *hpos = 0; 7413aa18df8SVille Syrjälä } else { 7420af7e4dfSMario Kleiner *vpos = position / htotal; 7430af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 7440af7e4dfSMario Kleiner } 7450af7e4dfSMario Kleiner 7460af7e4dfSMario Kleiner /* In vblank? */ 7470af7e4dfSMario Kleiner if (in_vbl) 7480af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 7490af7e4dfSMario Kleiner 7500af7e4dfSMario Kleiner return ret; 7510af7e4dfSMario Kleiner } 7520af7e4dfSMario Kleiner 753f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 7540af7e4dfSMario Kleiner int *max_error, 7550af7e4dfSMario Kleiner struct timeval *vblank_time, 7560af7e4dfSMario Kleiner unsigned flags) 7570af7e4dfSMario Kleiner { 7584041b853SChris Wilson struct drm_crtc *crtc; 7590af7e4dfSMario Kleiner 7607eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 7614041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 7620af7e4dfSMario Kleiner return -EINVAL; 7630af7e4dfSMario Kleiner } 7640af7e4dfSMario Kleiner 7650af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 7664041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 7674041b853SChris Wilson if (crtc == NULL) { 7684041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 7694041b853SChris Wilson return -EINVAL; 7704041b853SChris Wilson } 7714041b853SChris Wilson 7724041b853SChris Wilson if (!crtc->enabled) { 7734041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 7744041b853SChris Wilson return -EBUSY; 7754041b853SChris Wilson } 7760af7e4dfSMario Kleiner 7770af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 7784041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 7794041b853SChris Wilson vblank_time, flags, 7804041b853SChris Wilson crtc); 7810af7e4dfSMario Kleiner } 7820af7e4dfSMario Kleiner 78367c347ffSJani Nikula static bool intel_hpd_irq_event(struct drm_device *dev, 78467c347ffSJani Nikula struct drm_connector *connector) 785321a1b30SEgbert Eich { 786321a1b30SEgbert Eich enum drm_connector_status old_status; 787321a1b30SEgbert Eich 788321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 789321a1b30SEgbert Eich old_status = connector->status; 790321a1b30SEgbert Eich 791321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 79267c347ffSJani Nikula if (old_status == connector->status) 79367c347ffSJani Nikula return false; 79467c347ffSJani Nikula 79567c347ffSJani Nikula DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 796321a1b30SEgbert Eich connector->base.id, 797321a1b30SEgbert Eich drm_get_connector_name(connector), 79867c347ffSJani Nikula drm_get_connector_status_name(old_status), 79967c347ffSJani Nikula drm_get_connector_status_name(connector->status)); 80067c347ffSJani Nikula 80167c347ffSJani Nikula return true; 802321a1b30SEgbert Eich } 803321a1b30SEgbert Eich 8045ca58282SJesse Barnes /* 8055ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 8065ca58282SJesse Barnes */ 807ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 808ac4c16c5SEgbert Eich 8095ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 8105ca58282SJesse Barnes { 8115ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 8125ca58282SJesse Barnes hotplug_work); 8135ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 814c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 815cd569aedSEgbert Eich struct intel_connector *intel_connector; 816cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 817cd569aedSEgbert Eich struct drm_connector *connector; 818cd569aedSEgbert Eich unsigned long irqflags; 819cd569aedSEgbert Eich bool hpd_disabled = false; 820321a1b30SEgbert Eich bool changed = false; 821142e2398SEgbert Eich u32 hpd_event_bits; 8225ca58282SJesse Barnes 82352d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 82452d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 82552d7ecedSDaniel Vetter return; 82652d7ecedSDaniel Vetter 827a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 828e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 829e67189abSJesse Barnes 830cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 831142e2398SEgbert Eich 832142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 833142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 834cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 835cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 836cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 837cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 838cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 839cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 840cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 841cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 842cd569aedSEgbert Eich drm_get_connector_name(connector)); 843cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 844cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 845cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 846cd569aedSEgbert Eich hpd_disabled = true; 847cd569aedSEgbert Eich } 848142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 849142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 850142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 851142e2398SEgbert Eich } 852cd569aedSEgbert Eich } 853cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 854cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 855cd569aedSEgbert Eich * some connectors */ 856ac4c16c5SEgbert Eich if (hpd_disabled) { 857cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 858ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 859ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 860ac4c16c5SEgbert Eich } 861cd569aedSEgbert Eich 862cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 863cd569aedSEgbert Eich 864321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 865321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 866321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 867321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 868cd569aedSEgbert Eich if (intel_encoder->hot_plug) 869cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 870321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 871321a1b30SEgbert Eich changed = true; 872321a1b30SEgbert Eich } 873321a1b30SEgbert Eich } 87440ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 87540ee3381SKeith Packard 876321a1b30SEgbert Eich if (changed) 877321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 8785ca58282SJesse Barnes } 8795ca58282SJesse Barnes 880d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 881f97108d1SJesse Barnes { 882f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 883b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 8849270388eSDaniel Vetter u8 new_delay; 8859270388eSDaniel Vetter 886d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 887f97108d1SJesse Barnes 88873edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 88973edd18fSDaniel Vetter 89020e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 8919270388eSDaniel Vetter 8927648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 893b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 894b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 895f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 896f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 897f97108d1SJesse Barnes 898f97108d1SJesse Barnes /* Handle RCS change request from hw */ 899b5b72e89SMatthew Garrett if (busy_up > max_avg) { 90020e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 90120e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 90220e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 90320e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 904b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 90520e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 90620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 90720e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 90820e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 909f97108d1SJesse Barnes } 910f97108d1SJesse Barnes 9117648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 91220e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 913f97108d1SJesse Barnes 914d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 9159270388eSDaniel Vetter 916f97108d1SJesse Barnes return; 917f97108d1SJesse Barnes } 918f97108d1SJesse Barnes 919549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 920549f7365SChris Wilson struct intel_ring_buffer *ring) 921549f7365SChris Wilson { 922475553deSChris Wilson if (ring->obj == NULL) 923475553deSChris Wilson return; 924475553deSChris Wilson 925814e9b57SChris Wilson trace_i915_gem_request_complete(ring); 9269862e600SChris Wilson 927549f7365SChris Wilson wake_up_all(&ring->irq_queue); 92810cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 929549f7365SChris Wilson } 930549f7365SChris Wilson 9314912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 9323b8d8d91SJesse Barnes { 9334912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 934c6a828d3SDaniel Vetter rps.work); 935edbfdb45SPaulo Zanoni u32 pm_iir; 936dd75fdc8SChris Wilson int new_delay, adj; 9373b8d8d91SJesse Barnes 93859cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 939c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 940c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 9414848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 942edbfdb45SPaulo Zanoni snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 94359cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 9444912d041SBen Widawsky 94560611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 94660611c13SPaulo Zanoni WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 94760611c13SPaulo Zanoni 9484848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 9493b8d8d91SJesse Barnes return; 9503b8d8d91SJesse Barnes 9514fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 9527b9e0ae6SChris Wilson 953dd75fdc8SChris Wilson adj = dev_priv->rps.last_adj; 9547425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 955dd75fdc8SChris Wilson if (adj > 0) 956dd75fdc8SChris Wilson adj *= 2; 957dd75fdc8SChris Wilson else 958dd75fdc8SChris Wilson adj = 1; 959dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 9607425034aSVille Syrjälä 9617425034aSVille Syrjälä /* 9627425034aSVille Syrjälä * For better performance, jump directly 9637425034aSVille Syrjälä * to RPe if we're below it. 9647425034aSVille Syrjälä */ 965dd75fdc8SChris Wilson if (new_delay < dev_priv->rps.rpe_delay) 9667425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 967dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 968dd75fdc8SChris Wilson if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 969dd75fdc8SChris Wilson new_delay = dev_priv->rps.rpe_delay; 970dd75fdc8SChris Wilson else 971dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 972dd75fdc8SChris Wilson adj = 0; 973dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 974dd75fdc8SChris Wilson if (adj < 0) 975dd75fdc8SChris Wilson adj *= 2; 976dd75fdc8SChris Wilson else 977dd75fdc8SChris Wilson adj = -1; 978dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay + adj; 979dd75fdc8SChris Wilson } else { /* unknown event */ 980dd75fdc8SChris Wilson new_delay = dev_priv->rps.cur_delay; 981dd75fdc8SChris Wilson } 9823b8d8d91SJesse Barnes 98379249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 98479249636SBen Widawsky * interrupt 98579249636SBen Widawsky */ 986dd75fdc8SChris Wilson if (new_delay < (int)dev_priv->rps.min_delay) 987dd75fdc8SChris Wilson new_delay = dev_priv->rps.min_delay; 988dd75fdc8SChris Wilson if (new_delay > (int)dev_priv->rps.max_delay) 989dd75fdc8SChris Wilson new_delay = dev_priv->rps.max_delay; 990dd75fdc8SChris Wilson dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 991dd75fdc8SChris Wilson 9920a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 9930a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 9940a073b84SJesse Barnes else 9954912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 9963b8d8d91SJesse Barnes 9974fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 9983b8d8d91SJesse Barnes } 9993b8d8d91SJesse Barnes 1000e3689190SBen Widawsky 1001e3689190SBen Widawsky /** 1002e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 1003e3689190SBen Widawsky * occurred. 1004e3689190SBen Widawsky * @work: workqueue struct 1005e3689190SBen Widawsky * 1006e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 1007e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 1008e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 1009e3689190SBen Widawsky */ 1010e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 1011e3689190SBen Widawsky { 1012e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1013a4da4fa4SDaniel Vetter l3_parity.error_work); 1014e3689190SBen Widawsky u32 error_status, row, bank, subbank; 101535a85ac6SBen Widawsky char *parity_event[6]; 1016e3689190SBen Widawsky uint32_t misccpctl; 1017e3689190SBen Widawsky unsigned long flags; 101835a85ac6SBen Widawsky uint8_t slice = 0; 1019e3689190SBen Widawsky 1020e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1021e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1022e3689190SBen Widawsky * any time we access those registers. 1023e3689190SBen Widawsky */ 1024e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 1025e3689190SBen Widawsky 102635a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 102735a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 102835a85ac6SBen Widawsky goto out; 102935a85ac6SBen Widawsky 1030e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1031e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1032e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1033e3689190SBen Widawsky 103435a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 103535a85ac6SBen Widawsky u32 reg; 103635a85ac6SBen Widawsky 103735a85ac6SBen Widawsky slice--; 103835a85ac6SBen Widawsky if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 103935a85ac6SBen Widawsky break; 104035a85ac6SBen Widawsky 104135a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 104235a85ac6SBen Widawsky 104335a85ac6SBen Widawsky reg = GEN7_L3CDERRST1 + (slice * 0x200); 104435a85ac6SBen Widawsky 104535a85ac6SBen Widawsky error_status = I915_READ(reg); 1046e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1047e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1048e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1049e3689190SBen Widawsky 105035a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 105135a85ac6SBen Widawsky POSTING_READ(reg); 1052e3689190SBen Widawsky 1053cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1054e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1055e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1056e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 105735a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 105835a85ac6SBen Widawsky parity_event[5] = NULL; 1059e3689190SBen Widawsky 1060e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 1061e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1062e3689190SBen Widawsky 106335a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 106435a85ac6SBen Widawsky slice, row, bank, subbank); 1065e3689190SBen Widawsky 106635a85ac6SBen Widawsky kfree(parity_event[4]); 1067e3689190SBen Widawsky kfree(parity_event[3]); 1068e3689190SBen Widawsky kfree(parity_event[2]); 1069e3689190SBen Widawsky kfree(parity_event[1]); 1070e3689190SBen Widawsky } 1071e3689190SBen Widawsky 107235a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 107335a85ac6SBen Widawsky 107435a85ac6SBen Widawsky out: 107535a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 107635a85ac6SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 107735a85ac6SBen Widawsky ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 107835a85ac6SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 107935a85ac6SBen Widawsky 108035a85ac6SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 108135a85ac6SBen Widawsky } 108235a85ac6SBen Widawsky 108335a85ac6SBen Widawsky static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1084e3689190SBen Widawsky { 1085e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1086e3689190SBen Widawsky 1087040d2baaSBen Widawsky if (!HAS_L3_DPF(dev)) 1088e3689190SBen Widawsky return; 1089e3689190SBen Widawsky 1090d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 109135a85ac6SBen Widawsky ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1092d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1093e3689190SBen Widawsky 109435a85ac6SBen Widawsky iir &= GT_PARITY_ERROR(dev); 109535a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 109635a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 109735a85ac6SBen Widawsky 109835a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 109935a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 110035a85ac6SBen Widawsky 1101a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1102e3689190SBen Widawsky } 1103e3689190SBen Widawsky 1104f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 1105f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 1106f1af8fc1SPaulo Zanoni u32 gt_iir) 1107f1af8fc1SPaulo Zanoni { 1108f1af8fc1SPaulo Zanoni if (gt_iir & 1109f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1110f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 1111f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 1112f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 1113f1af8fc1SPaulo Zanoni } 1114f1af8fc1SPaulo Zanoni 1115e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 1116e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 1117e7b4c6b1SDaniel Vetter u32 gt_iir) 1118e7b4c6b1SDaniel Vetter { 1119e7b4c6b1SDaniel Vetter 1120cc609d5dSBen Widawsky if (gt_iir & 1121cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1122e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 1123cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 1124e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 1125cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 1126e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 1127e7b4c6b1SDaniel Vetter 1128cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1129cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1130cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1131e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1132e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 1133e7b4c6b1SDaniel Vetter } 1134e3689190SBen Widawsky 113535a85ac6SBen Widawsky if (gt_iir & GT_PARITY_ERROR(dev)) 113635a85ac6SBen Widawsky ivybridge_parity_error_irq_handler(dev, gt_iir); 1137e7b4c6b1SDaniel Vetter } 1138e7b4c6b1SDaniel Vetter 1139abd58f01SBen Widawsky static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1140abd58f01SBen Widawsky struct drm_i915_private *dev_priv, 1141abd58f01SBen Widawsky u32 master_ctl) 1142abd58f01SBen Widawsky { 1143abd58f01SBen Widawsky u32 rcs, bcs, vcs; 1144abd58f01SBen Widawsky uint32_t tmp = 0; 1145abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 1146abd58f01SBen Widawsky 1147abd58f01SBen Widawsky if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1148abd58f01SBen Widawsky tmp = I915_READ(GEN8_GT_IIR(0)); 1149abd58f01SBen Widawsky if (tmp) { 1150abd58f01SBen Widawsky ret = IRQ_HANDLED; 1151abd58f01SBen Widawsky rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1152abd58f01SBen Widawsky bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1153abd58f01SBen Widawsky if (rcs & GT_RENDER_USER_INTERRUPT) 1154abd58f01SBen Widawsky notify_ring(dev, &dev_priv->ring[RCS]); 1155abd58f01SBen Widawsky if (bcs & GT_RENDER_USER_INTERRUPT) 1156abd58f01SBen Widawsky notify_ring(dev, &dev_priv->ring[BCS]); 1157abd58f01SBen Widawsky I915_WRITE(GEN8_GT_IIR(0), tmp); 1158abd58f01SBen Widawsky } else 1159abd58f01SBen Widawsky DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1160abd58f01SBen Widawsky } 1161abd58f01SBen Widawsky 1162abd58f01SBen Widawsky if (master_ctl & GEN8_GT_VCS1_IRQ) { 1163abd58f01SBen Widawsky tmp = I915_READ(GEN8_GT_IIR(1)); 1164abd58f01SBen Widawsky if (tmp) { 1165abd58f01SBen Widawsky ret = IRQ_HANDLED; 1166abd58f01SBen Widawsky vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1167abd58f01SBen Widawsky if (vcs & GT_RENDER_USER_INTERRUPT) 1168abd58f01SBen Widawsky notify_ring(dev, &dev_priv->ring[VCS]); 1169abd58f01SBen Widawsky I915_WRITE(GEN8_GT_IIR(1), tmp); 1170abd58f01SBen Widawsky } else 1171abd58f01SBen Widawsky DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1172abd58f01SBen Widawsky } 1173abd58f01SBen Widawsky 1174abd58f01SBen Widawsky if (master_ctl & GEN8_GT_VECS_IRQ) { 1175abd58f01SBen Widawsky tmp = I915_READ(GEN8_GT_IIR(3)); 1176abd58f01SBen Widawsky if (tmp) { 1177abd58f01SBen Widawsky ret = IRQ_HANDLED; 1178abd58f01SBen Widawsky vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1179abd58f01SBen Widawsky if (vcs & GT_RENDER_USER_INTERRUPT) 1180abd58f01SBen Widawsky notify_ring(dev, &dev_priv->ring[VECS]); 1181abd58f01SBen Widawsky I915_WRITE(GEN8_GT_IIR(3), tmp); 1182abd58f01SBen Widawsky } else 1183abd58f01SBen Widawsky DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1184abd58f01SBen Widawsky } 1185abd58f01SBen Widawsky 1186abd58f01SBen Widawsky return ret; 1187abd58f01SBen Widawsky } 1188abd58f01SBen Widawsky 1189b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 1190b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 1191b543fb04SEgbert Eich 119210a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 1193b543fb04SEgbert Eich u32 hotplug_trigger, 1194b543fb04SEgbert Eich const u32 *hpd) 1195b543fb04SEgbert Eich { 1196b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 1197b543fb04SEgbert Eich int i; 119810a504deSDaniel Vetter bool storm_detected = false; 1199b543fb04SEgbert Eich 120091d131d2SDaniel Vetter if (!hotplug_trigger) 120191d131d2SDaniel Vetter return; 120291d131d2SDaniel Vetter 1203b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1204b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 1205821450c6SEgbert Eich 1206b8f102e8SEgbert Eich WARN(((hpd[i] & hotplug_trigger) && 1207b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 1208b8f102e8SEgbert Eich "Received HPD interrupt although disabled\n"); 1209b8f102e8SEgbert Eich 1210b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 1211b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1212b543fb04SEgbert Eich continue; 1213b543fb04SEgbert Eich 1214bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 1215b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1216b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 1217b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1218b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1219b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 1220b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1221b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1222b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1223142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 1224b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 122510a504deSDaniel Vetter storm_detected = true; 1226b543fb04SEgbert Eich } else { 1227b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 1228b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1229b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt); 1230b543fb04SEgbert Eich } 1231b543fb04SEgbert Eich } 1232b543fb04SEgbert Eich 123310a504deSDaniel Vetter if (storm_detected) 123410a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 1235b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 12365876fa0dSDaniel Vetter 1237645416f5SDaniel Vetter /* 1238645416f5SDaniel Vetter * Our hotplug handler can grab modeset locks (by calling down into the 1239645416f5SDaniel Vetter * fb helpers). Hence it must not be run on our own dev-priv->wq work 1240645416f5SDaniel Vetter * queue for otherwise the flush_work in the pageflip code will 1241645416f5SDaniel Vetter * deadlock. 1242645416f5SDaniel Vetter */ 1243645416f5SDaniel Vetter schedule_work(&dev_priv->hotplug_work); 1244b543fb04SEgbert Eich } 1245b543fb04SEgbert Eich 1246515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 1247515ac2bbSDaniel Vetter { 124828c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 124928c70f16SDaniel Vetter 125028c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1251515ac2bbSDaniel Vetter } 1252515ac2bbSDaniel Vetter 1253ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 1254ce99c256SDaniel Vetter { 12559ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 12569ee32feaSDaniel Vetter 12579ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1258ce99c256SDaniel Vetter } 1259ce99c256SDaniel Vetter 12608bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 1261277de95eSDaniel Vetter static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1262eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1263eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 12648bc5e955SDaniel Vetter uint32_t crc4) 12658bf1e9f1SShuang He { 12668bf1e9f1SShuang He struct drm_i915_private *dev_priv = dev->dev_private; 12678bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 12688bf1e9f1SShuang He struct intel_pipe_crc_entry *entry; 1269ac2300d4SDamien Lespiau int head, tail; 1270b2c88f5bSDamien Lespiau 1271d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 1272d538bbdfSDamien Lespiau 12730c912c79SDamien Lespiau if (!pipe_crc->entries) { 1274d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 12750c912c79SDamien Lespiau DRM_ERROR("spurious interrupt\n"); 12760c912c79SDamien Lespiau return; 12770c912c79SDamien Lespiau } 12780c912c79SDamien Lespiau 1279d538bbdfSDamien Lespiau head = pipe_crc->head; 1280d538bbdfSDamien Lespiau tail = pipe_crc->tail; 1281b2c88f5bSDamien Lespiau 1282b2c88f5bSDamien Lespiau if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1283d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 1284b2c88f5bSDamien Lespiau DRM_ERROR("CRC buffer overflowing\n"); 1285b2c88f5bSDamien Lespiau return; 1286b2c88f5bSDamien Lespiau } 1287b2c88f5bSDamien Lespiau 1288b2c88f5bSDamien Lespiau entry = &pipe_crc->entries[head]; 12898bf1e9f1SShuang He 12908bc5e955SDaniel Vetter entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1291eba94eb9SDaniel Vetter entry->crc[0] = crc0; 1292eba94eb9SDaniel Vetter entry->crc[1] = crc1; 1293eba94eb9SDaniel Vetter entry->crc[2] = crc2; 1294eba94eb9SDaniel Vetter entry->crc[3] = crc3; 1295eba94eb9SDaniel Vetter entry->crc[4] = crc4; 1296b2c88f5bSDamien Lespiau 1297b2c88f5bSDamien Lespiau head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1298d538bbdfSDamien Lespiau pipe_crc->head = head; 1299d538bbdfSDamien Lespiau 1300d538bbdfSDamien Lespiau spin_unlock(&pipe_crc->lock); 130107144428SDamien Lespiau 130207144428SDamien Lespiau wake_up_interruptible(&pipe_crc->wq); 13038bf1e9f1SShuang He } 1304277de95eSDaniel Vetter #else 1305277de95eSDaniel Vetter static inline void 1306277de95eSDaniel Vetter display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1307277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1308277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1309277de95eSDaniel Vetter uint32_t crc4) {} 1310277de95eSDaniel Vetter #endif 1311eba94eb9SDaniel Vetter 1312277de95eSDaniel Vetter 1313277de95eSDaniel Vetter static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 13145a69b89fSDaniel Vetter { 13155a69b89fSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 13165a69b89fSDaniel Vetter 1317277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 13185a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 13195a69b89fSDaniel Vetter 0, 0, 0, 0); 13205a69b89fSDaniel Vetter } 13215a69b89fSDaniel Vetter 1322277de95eSDaniel Vetter static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1323eba94eb9SDaniel Vetter { 1324eba94eb9SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 1325eba94eb9SDaniel Vetter 1326277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 1327eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1328eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1329eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1330eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 13318bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1332eba94eb9SDaniel Vetter } 13335b3a856bSDaniel Vetter 1334277de95eSDaniel Vetter static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 13355b3a856bSDaniel Vetter { 13365b3a856bSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 13370b5c5ed0SDaniel Vetter uint32_t res1, res2; 13380b5c5ed0SDaniel Vetter 13390b5c5ed0SDaniel Vetter if (INTEL_INFO(dev)->gen >= 3) 13400b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 13410b5c5ed0SDaniel Vetter else 13420b5c5ed0SDaniel Vetter res1 = 0; 13430b5c5ed0SDaniel Vetter 13440b5c5ed0SDaniel Vetter if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 13450b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 13460b5c5ed0SDaniel Vetter else 13470b5c5ed0SDaniel Vetter res2 = 0; 13485b3a856bSDaniel Vetter 1349277de95eSDaniel Vetter display_pipe_crc_irq_handler(dev, pipe, 13500b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 13510b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 13520b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 13530b5c5ed0SDaniel Vetter res1, res2); 13545b3a856bSDaniel Vetter } 13558bf1e9f1SShuang He 13561403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 13571403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 13581403c0d4SPaulo Zanoni * the work queue. */ 13591403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1360baf02a1fSBen Widawsky { 136141a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 136259cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 13634848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 13644d3b3d5fSPaulo Zanoni snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 136559cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 13662adbee62SDaniel Vetter 13672adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 136841a05a3aSDaniel Vetter } 1369baf02a1fSBen Widawsky 13701403c0d4SPaulo Zanoni if (HAS_VEBOX(dev_priv->dev)) { 137112638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 137212638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 137312638c57SBen Widawsky 137412638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 137512638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 137612638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 137712638c57SBen Widawsky } 137812638c57SBen Widawsky } 13791403c0d4SPaulo Zanoni } 1380baf02a1fSBen Widawsky 1381ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 13827e231dbeSJesse Barnes { 13837e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 13847e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 13857e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 13867e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 13877e231dbeSJesse Barnes unsigned long irqflags; 13887e231dbeSJesse Barnes int pipe; 13897e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 13907e231dbeSJesse Barnes 13917e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 13927e231dbeSJesse Barnes 13937e231dbeSJesse Barnes while (true) { 13947e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 13957e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 13967e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 13977e231dbeSJesse Barnes 13987e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 13997e231dbeSJesse Barnes goto out; 14007e231dbeSJesse Barnes 14017e231dbeSJesse Barnes ret = IRQ_HANDLED; 14027e231dbeSJesse Barnes 1403e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 14047e231dbeSJesse Barnes 14057e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 14067e231dbeSJesse Barnes for_each_pipe(pipe) { 14077e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 14087e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 14097e231dbeSJesse Barnes 14107e231dbeSJesse Barnes /* 14117e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 14127e231dbeSJesse Barnes */ 14137e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 14147e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 14157e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 14167e231dbeSJesse Barnes pipe_name(pipe)); 14177e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 14187e231dbeSJesse Barnes } 14197e231dbeSJesse Barnes } 14207e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 14217e231dbeSJesse Barnes 142231acc7f5SJesse Barnes for_each_pipe(pipe) { 142331acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 142431acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 142531acc7f5SJesse Barnes 142631acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 142731acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 142831acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 142931acc7f5SJesse Barnes } 14304356d586SDaniel Vetter 14314356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1432277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 143331acc7f5SJesse Barnes } 143431acc7f5SJesse Barnes 14357e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 14367e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 14377e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1438b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 14397e231dbeSJesse Barnes 14407e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 14417e231dbeSJesse Barnes hotplug_status); 144291d131d2SDaniel Vetter 144310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 144491d131d2SDaniel Vetter 14457e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 14467e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 14477e231dbeSJesse Barnes } 14487e231dbeSJesse Barnes 1449515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1450515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 14517e231dbeSJesse Barnes 145260611c13SPaulo Zanoni if (pm_iir) 1453d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 14547e231dbeSJesse Barnes 14557e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 14567e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 14577e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 14587e231dbeSJesse Barnes } 14597e231dbeSJesse Barnes 14607e231dbeSJesse Barnes out: 14617e231dbeSJesse Barnes return ret; 14627e231dbeSJesse Barnes } 14637e231dbeSJesse Barnes 146423e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1465776ad806SJesse Barnes { 1466776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 14679db4a9c7SJesse Barnes int pipe; 1468b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1469776ad806SJesse Barnes 147010a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 147191d131d2SDaniel Vetter 1472cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1473cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1474776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1475cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1476cfc33bf7SVille Syrjälä port_name(port)); 1477cfc33bf7SVille Syrjälä } 1478776ad806SJesse Barnes 1479ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1480ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1481ce99c256SDaniel Vetter 1482776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1483515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1484776ad806SJesse Barnes 1485776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1486776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1487776ad806SJesse Barnes 1488776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1489776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1490776ad806SJesse Barnes 1491776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1492776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1493776ad806SJesse Barnes 14949db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 14959db4a9c7SJesse Barnes for_each_pipe(pipe) 14969db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 14979db4a9c7SJesse Barnes pipe_name(pipe), 14989db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1499776ad806SJesse Barnes 1500776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1501776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1502776ad806SJesse Barnes 1503776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1504776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1505776ad806SJesse Barnes 1506776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 15078664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 15088664281bSPaulo Zanoni false)) 15098664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 15108664281bSPaulo Zanoni 15118664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 15128664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 15138664281bSPaulo Zanoni false)) 15148664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 15158664281bSPaulo Zanoni } 15168664281bSPaulo Zanoni 15178664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 15188664281bSPaulo Zanoni { 15198664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 15208664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 15215a69b89fSDaniel Vetter enum pipe pipe; 15228664281bSPaulo Zanoni 1523de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1524de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1525de032bf4SPaulo Zanoni 15265a69b89fSDaniel Vetter for_each_pipe(pipe) { 15275a69b89fSDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 15285a69b89fSDaniel Vetter if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 15295a69b89fSDaniel Vetter false)) 15305a69b89fSDaniel Vetter DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 15315a69b89fSDaniel Vetter pipe_name(pipe)); 15325a69b89fSDaniel Vetter } 15338664281bSPaulo Zanoni 15345a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 15355a69b89fSDaniel Vetter if (IS_IVYBRIDGE(dev)) 1536277de95eSDaniel Vetter ivb_pipe_crc_irq_handler(dev, pipe); 15375a69b89fSDaniel Vetter else 1538277de95eSDaniel Vetter hsw_pipe_crc_irq_handler(dev, pipe); 15395a69b89fSDaniel Vetter } 15405a69b89fSDaniel Vetter } 15418bf1e9f1SShuang He 15428664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 15438664281bSPaulo Zanoni } 15448664281bSPaulo Zanoni 15458664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 15468664281bSPaulo Zanoni { 15478664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 15488664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 15498664281bSPaulo Zanoni 1550de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1551de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1552de032bf4SPaulo Zanoni 15538664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 15548664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 15558664281bSPaulo Zanoni false)) 15568664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 15578664281bSPaulo Zanoni 15588664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 15598664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 15608664281bSPaulo Zanoni false)) 15618664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 15628664281bSPaulo Zanoni 15638664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 15648664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 15658664281bSPaulo Zanoni false)) 15668664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 15678664281bSPaulo Zanoni 15688664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1569776ad806SJesse Barnes } 1570776ad806SJesse Barnes 157123e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 157223e81d69SAdam Jackson { 157323e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 157423e81d69SAdam Jackson int pipe; 1575b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 157623e81d69SAdam Jackson 157710a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 157891d131d2SDaniel Vetter 1579cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1580cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 158123e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1582cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1583cfc33bf7SVille Syrjälä port_name(port)); 1584cfc33bf7SVille Syrjälä } 158523e81d69SAdam Jackson 158623e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1587ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 158823e81d69SAdam Jackson 158923e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1590515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 159123e81d69SAdam Jackson 159223e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 159323e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 159423e81d69SAdam Jackson 159523e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 159623e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 159723e81d69SAdam Jackson 159823e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 159923e81d69SAdam Jackson for_each_pipe(pipe) 160023e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 160123e81d69SAdam Jackson pipe_name(pipe), 160223e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 16038664281bSPaulo Zanoni 16048664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 16058664281bSPaulo Zanoni cpt_serr_int_handler(dev); 160623e81d69SAdam Jackson } 160723e81d69SAdam Jackson 1608c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1609c008bc6eSPaulo Zanoni { 1610c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 161140da17c2SDaniel Vetter enum pipe pipe; 1612c008bc6eSPaulo Zanoni 1613c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1614c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1615c008bc6eSPaulo Zanoni 1616c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1617c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1618c008bc6eSPaulo Zanoni 1619c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1620c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1621c008bc6eSPaulo Zanoni 162240da17c2SDaniel Vetter for_each_pipe(pipe) { 162340da17c2SDaniel Vetter if (de_iir & DE_PIPE_VBLANK(pipe)) 162440da17c2SDaniel Vetter drm_handle_vblank(dev, pipe); 1625c008bc6eSPaulo Zanoni 162640da17c2SDaniel Vetter if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 162740da17c2SDaniel Vetter if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 162840da17c2SDaniel Vetter DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 162940da17c2SDaniel Vetter pipe_name(pipe)); 1630c008bc6eSPaulo Zanoni 163140da17c2SDaniel Vetter if (de_iir & DE_PIPE_CRC_DONE(pipe)) 163240da17c2SDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 16335b3a856bSDaniel Vetter 163440da17c2SDaniel Vetter /* plane/pipes map 1:1 on ilk+ */ 163540da17c2SDaniel Vetter if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 163640da17c2SDaniel Vetter intel_prepare_page_flip(dev, pipe); 163740da17c2SDaniel Vetter intel_finish_page_flip_plane(dev, pipe); 1638c008bc6eSPaulo Zanoni } 1639c008bc6eSPaulo Zanoni } 1640c008bc6eSPaulo Zanoni 1641c008bc6eSPaulo Zanoni /* check event from PCH */ 1642c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1643c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1644c008bc6eSPaulo Zanoni 1645c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1646c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1647c008bc6eSPaulo Zanoni else 1648c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1649c008bc6eSPaulo Zanoni 1650c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1651c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1652c008bc6eSPaulo Zanoni } 1653c008bc6eSPaulo Zanoni 1654c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1655c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1656c008bc6eSPaulo Zanoni } 1657c008bc6eSPaulo Zanoni 16589719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 16599719fb98SPaulo Zanoni { 16609719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 16613b6c42e8SDaniel Vetter enum pipe i; 16629719fb98SPaulo Zanoni 16639719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 16649719fb98SPaulo Zanoni ivb_err_int_handler(dev); 16659719fb98SPaulo Zanoni 16669719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 16679719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 16689719fb98SPaulo Zanoni 16699719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 16709719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 16719719fb98SPaulo Zanoni 16723b6c42e8SDaniel Vetter for_each_pipe(i) { 167340da17c2SDaniel Vetter if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 16749719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 167540da17c2SDaniel Vetter 167640da17c2SDaniel Vetter /* plane/pipes map 1:1 on ilk+ */ 167740da17c2SDaniel Vetter if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 16789719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 16799719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 16809719fb98SPaulo Zanoni } 16819719fb98SPaulo Zanoni } 16829719fb98SPaulo Zanoni 16839719fb98SPaulo Zanoni /* check event from PCH */ 16849719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 16859719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 16869719fb98SPaulo Zanoni 16879719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 16889719fb98SPaulo Zanoni 16899719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 16909719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 16919719fb98SPaulo Zanoni } 16929719fb98SPaulo Zanoni } 16939719fb98SPaulo Zanoni 1694f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1695b1f14ad0SJesse Barnes { 1696b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1697b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1698f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 16990e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1700b1f14ad0SJesse Barnes 1701b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1702b1f14ad0SJesse Barnes 17038664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 17048664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 1705907b28c5SChris Wilson intel_uncore_check_errors(dev); 17068664281bSPaulo Zanoni 1707b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1708b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1709b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 171023a78516SPaulo Zanoni POSTING_READ(DEIER); 17110e43406bSChris Wilson 171244498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 171344498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 171444498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 171544498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 171644498aeaSPaulo Zanoni * due to its back queue). */ 1717ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 171844498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 171944498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 172044498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1721ab5c608bSBen Widawsky } 172244498aeaSPaulo Zanoni 17230e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 17240e43406bSChris Wilson if (gt_iir) { 1725d8fc8a47SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) 17260e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 1727d8fc8a47SPaulo Zanoni else 1728d8fc8a47SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 17290e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 17300e43406bSChris Wilson ret = IRQ_HANDLED; 17310e43406bSChris Wilson } 1732b1f14ad0SJesse Barnes 1733b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 17340e43406bSChris Wilson if (de_iir) { 1735f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 17369719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1737f1af8fc1SPaulo Zanoni else 1738f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 17390e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 17400e43406bSChris Wilson ret = IRQ_HANDLED; 17410e43406bSChris Wilson } 17420e43406bSChris Wilson 1743f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1744f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 17450e43406bSChris Wilson if (pm_iir) { 1746d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1747b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 17480e43406bSChris Wilson ret = IRQ_HANDLED; 17490e43406bSChris Wilson } 1750f1af8fc1SPaulo Zanoni } 1751b1f14ad0SJesse Barnes 1752b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1753b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1754ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 175544498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 175644498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1757ab5c608bSBen Widawsky } 1758b1f14ad0SJesse Barnes 1759b1f14ad0SJesse Barnes return ret; 1760b1f14ad0SJesse Barnes } 1761b1f14ad0SJesse Barnes 1762abd58f01SBen Widawsky static irqreturn_t gen8_irq_handler(int irq, void *arg) 1763abd58f01SBen Widawsky { 1764abd58f01SBen Widawsky struct drm_device *dev = arg; 1765abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 1766abd58f01SBen Widawsky u32 master_ctl; 1767abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 1768abd58f01SBen Widawsky uint32_t tmp = 0; 1769c42664ccSDaniel Vetter enum pipe pipe; 1770abd58f01SBen Widawsky 1771abd58f01SBen Widawsky atomic_inc(&dev_priv->irq_received); 1772abd58f01SBen Widawsky 1773abd58f01SBen Widawsky master_ctl = I915_READ(GEN8_MASTER_IRQ); 1774abd58f01SBen Widawsky master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1775abd58f01SBen Widawsky if (!master_ctl) 1776abd58f01SBen Widawsky return IRQ_NONE; 1777abd58f01SBen Widawsky 1778abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, 0); 1779abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 1780abd58f01SBen Widawsky 1781abd58f01SBen Widawsky ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1782abd58f01SBen Widawsky 1783abd58f01SBen Widawsky if (master_ctl & GEN8_DE_MISC_IRQ) { 1784abd58f01SBen Widawsky tmp = I915_READ(GEN8_DE_MISC_IIR); 1785abd58f01SBen Widawsky if (tmp & GEN8_DE_MISC_GSE) 1786abd58f01SBen Widawsky intel_opregion_asle_intr(dev); 1787abd58f01SBen Widawsky else if (tmp) 1788abd58f01SBen Widawsky DRM_ERROR("Unexpected DE Misc interrupt\n"); 1789abd58f01SBen Widawsky else 1790abd58f01SBen Widawsky DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1791abd58f01SBen Widawsky 1792abd58f01SBen Widawsky if (tmp) { 1793abd58f01SBen Widawsky I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1794abd58f01SBen Widawsky ret = IRQ_HANDLED; 1795abd58f01SBen Widawsky } 1796abd58f01SBen Widawsky } 1797abd58f01SBen Widawsky 17986d766f02SDaniel Vetter if (master_ctl & GEN8_DE_PORT_IRQ) { 17996d766f02SDaniel Vetter tmp = I915_READ(GEN8_DE_PORT_IIR); 18006d766f02SDaniel Vetter if (tmp & GEN8_AUX_CHANNEL_A) 18016d766f02SDaniel Vetter dp_aux_irq_handler(dev); 18026d766f02SDaniel Vetter else if (tmp) 18036d766f02SDaniel Vetter DRM_ERROR("Unexpected DE Port interrupt\n"); 18046d766f02SDaniel Vetter else 18056d766f02SDaniel Vetter DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 18066d766f02SDaniel Vetter 18076d766f02SDaniel Vetter if (tmp) { 18086d766f02SDaniel Vetter I915_WRITE(GEN8_DE_PORT_IIR, tmp); 18096d766f02SDaniel Vetter ret = IRQ_HANDLED; 18106d766f02SDaniel Vetter } 18116d766f02SDaniel Vetter } 18126d766f02SDaniel Vetter 1813abd58f01SBen Widawsky for_each_pipe(pipe) { 1814abd58f01SBen Widawsky uint32_t pipe_iir; 1815abd58f01SBen Widawsky 1816c42664ccSDaniel Vetter if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1817c42664ccSDaniel Vetter continue; 1818c42664ccSDaniel Vetter 1819abd58f01SBen Widawsky pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1820abd58f01SBen Widawsky if (pipe_iir & GEN8_PIPE_VBLANK) 1821abd58f01SBen Widawsky drm_handle_vblank(dev, pipe); 1822abd58f01SBen Widawsky 1823abd58f01SBen Widawsky if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1824abd58f01SBen Widawsky intel_prepare_page_flip(dev, pipe); 1825abd58f01SBen Widawsky intel_finish_page_flip_plane(dev, pipe); 1826abd58f01SBen Widawsky } 1827abd58f01SBen Widawsky 18280fbe7870SDaniel Vetter if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 18290fbe7870SDaniel Vetter hsw_pipe_crc_irq_handler(dev, pipe); 18300fbe7870SDaniel Vetter 183138d83c96SDaniel Vetter if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 183238d83c96SDaniel Vetter if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 183338d83c96SDaniel Vetter false)) 183438d83c96SDaniel Vetter DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 183538d83c96SDaniel Vetter pipe_name(pipe)); 183638d83c96SDaniel Vetter } 183738d83c96SDaniel Vetter 183830100f2bSDaniel Vetter if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 183930100f2bSDaniel Vetter DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 184030100f2bSDaniel Vetter pipe_name(pipe), 184130100f2bSDaniel Vetter pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 184230100f2bSDaniel Vetter } 1843abd58f01SBen Widawsky 1844abd58f01SBen Widawsky if (pipe_iir) { 1845abd58f01SBen Widawsky ret = IRQ_HANDLED; 1846abd58f01SBen Widawsky I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1847c42664ccSDaniel Vetter } else 1848abd58f01SBen Widawsky DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1849abd58f01SBen Widawsky } 1850abd58f01SBen Widawsky 185192d03a80SDaniel Vetter if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 185292d03a80SDaniel Vetter /* 185392d03a80SDaniel Vetter * FIXME(BDW): Assume for now that the new interrupt handling 185492d03a80SDaniel Vetter * scheme also closed the SDE interrupt handling race we've seen 185592d03a80SDaniel Vetter * on older pch-split platforms. But this needs testing. 185692d03a80SDaniel Vetter */ 185792d03a80SDaniel Vetter u32 pch_iir = I915_READ(SDEIIR); 185892d03a80SDaniel Vetter 185992d03a80SDaniel Vetter cpt_irq_handler(dev, pch_iir); 186092d03a80SDaniel Vetter 186192d03a80SDaniel Vetter if (pch_iir) { 186292d03a80SDaniel Vetter I915_WRITE(SDEIIR, pch_iir); 186392d03a80SDaniel Vetter ret = IRQ_HANDLED; 186492d03a80SDaniel Vetter } 186592d03a80SDaniel Vetter } 186692d03a80SDaniel Vetter 1867abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1868abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 1869abd58f01SBen Widawsky 1870abd58f01SBen Widawsky return ret; 1871abd58f01SBen Widawsky } 1872abd58f01SBen Widawsky 187317e1df07SDaniel Vetter static void i915_error_wake_up(struct drm_i915_private *dev_priv, 187417e1df07SDaniel Vetter bool reset_completed) 187517e1df07SDaniel Vetter { 187617e1df07SDaniel Vetter struct intel_ring_buffer *ring; 187717e1df07SDaniel Vetter int i; 187817e1df07SDaniel Vetter 187917e1df07SDaniel Vetter /* 188017e1df07SDaniel Vetter * Notify all waiters for GPU completion events that reset state has 188117e1df07SDaniel Vetter * been changed, and that they need to restart their wait after 188217e1df07SDaniel Vetter * checking for potential errors (and bail out to drop locks if there is 188317e1df07SDaniel Vetter * a gpu reset pending so that i915_error_work_func can acquire them). 188417e1df07SDaniel Vetter */ 188517e1df07SDaniel Vetter 188617e1df07SDaniel Vetter /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 188717e1df07SDaniel Vetter for_each_ring(ring, dev_priv, i) 188817e1df07SDaniel Vetter wake_up_all(&ring->irq_queue); 188917e1df07SDaniel Vetter 189017e1df07SDaniel Vetter /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 189117e1df07SDaniel Vetter wake_up_all(&dev_priv->pending_flip_queue); 189217e1df07SDaniel Vetter 189317e1df07SDaniel Vetter /* 189417e1df07SDaniel Vetter * Signal tasks blocked in i915_gem_wait_for_error that the pending 189517e1df07SDaniel Vetter * reset state is cleared. 189617e1df07SDaniel Vetter */ 189717e1df07SDaniel Vetter if (reset_completed) 189817e1df07SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 189917e1df07SDaniel Vetter } 190017e1df07SDaniel Vetter 19018a905236SJesse Barnes /** 19028a905236SJesse Barnes * i915_error_work_func - do process context error handling work 19038a905236SJesse Barnes * @work: work struct 19048a905236SJesse Barnes * 19058a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 19068a905236SJesse Barnes * was detected. 19078a905236SJesse Barnes */ 19088a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 19098a905236SJesse Barnes { 19101f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 19111f83fee0SDaniel Vetter work); 19121f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 19131f83fee0SDaniel Vetter gpu_error); 19148a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1915cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1916cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1917cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 191817e1df07SDaniel Vetter int ret; 19198a905236SJesse Barnes 1920f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 19218a905236SJesse Barnes 19227db0ba24SDaniel Vetter /* 19237db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 19247db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 19257db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 19267db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 19277db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 19287db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 19297db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 19307db0ba24SDaniel Vetter * work we don't need to worry about any other races. 19317db0ba24SDaniel Vetter */ 19327db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 193344d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 19347db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 19357db0ba24SDaniel Vetter reset_event); 19361f83fee0SDaniel Vetter 193717e1df07SDaniel Vetter /* 193817e1df07SDaniel Vetter * All state reset _must_ be completed before we update the 193917e1df07SDaniel Vetter * reset counter, for otherwise waiters might miss the reset 194017e1df07SDaniel Vetter * pending state and not properly drop locks, resulting in 194117e1df07SDaniel Vetter * deadlocks with the reset work. 194217e1df07SDaniel Vetter */ 1943f69061beSDaniel Vetter ret = i915_reset(dev); 1944f69061beSDaniel Vetter 194517e1df07SDaniel Vetter intel_display_handle_reset(dev); 194617e1df07SDaniel Vetter 1947f69061beSDaniel Vetter if (ret == 0) { 1948f69061beSDaniel Vetter /* 1949f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1950f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1951f69061beSDaniel Vetter * complete. 1952f69061beSDaniel Vetter * 1953f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1954f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1955f69061beSDaniel Vetter * updates before 1956f69061beSDaniel Vetter * the counter increment. 1957f69061beSDaniel Vetter */ 1958f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1959f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1960f69061beSDaniel Vetter 1961f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1962f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 19631f83fee0SDaniel Vetter } else { 19641f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1965f316a42cSBen Gamari } 19661f83fee0SDaniel Vetter 196717e1df07SDaniel Vetter /* 196817e1df07SDaniel Vetter * Note: The wake_up also serves as a memory barrier so that 196917e1df07SDaniel Vetter * waiters see the update value of the reset counter atomic_t. 197017e1df07SDaniel Vetter */ 197117e1df07SDaniel Vetter i915_error_wake_up(dev_priv, true); 1972f316a42cSBen Gamari } 19738a905236SJesse Barnes } 19748a905236SJesse Barnes 197535aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1976c0e09200SDave Airlie { 19778a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1978bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 197963eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1980050ee91fSBen Widawsky int pipe, i; 198163eeaf38SJesse Barnes 198235aed2e6SChris Wilson if (!eir) 198335aed2e6SChris Wilson return; 198463eeaf38SJesse Barnes 1985a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 19868a905236SJesse Barnes 1987bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1988bd9854f9SBen Widawsky 19898a905236SJesse Barnes if (IS_G4X(dev)) { 19908a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 19918a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 19928a905236SJesse Barnes 1993a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1994a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1995050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1996050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1997a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1998a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 19998a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 20003143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 20018a905236SJesse Barnes } 20028a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 20038a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2004a70491ccSJoe Perches pr_err("page table error\n"); 2005a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 20068a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20073143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 20088a905236SJesse Barnes } 20098a905236SJesse Barnes } 20108a905236SJesse Barnes 2011a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 201263eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 201363eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2014a70491ccSJoe Perches pr_err("page table error\n"); 2015a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 201663eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20173143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 201863eeaf38SJesse Barnes } 20198a905236SJesse Barnes } 20208a905236SJesse Barnes 202163eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 2022a70491ccSJoe Perches pr_err("memory refresh error:\n"); 20239db4a9c7SJesse Barnes for_each_pipe(pipe) 2024a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 20259db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 202663eeaf38SJesse Barnes /* pipestat has already been acked */ 202763eeaf38SJesse Barnes } 202863eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 2029a70491ccSJoe Perches pr_err("instruction error\n"); 2030a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2031050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 2032050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2033a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 203463eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 203563eeaf38SJesse Barnes 2036a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2037a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2038a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 203963eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 20403143a2bfSChris Wilson POSTING_READ(IPEIR); 204163eeaf38SJesse Barnes } else { 204263eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 204363eeaf38SJesse Barnes 2044a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2045a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2046a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2047a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 204863eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 20493143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 205063eeaf38SJesse Barnes } 205163eeaf38SJesse Barnes } 205263eeaf38SJesse Barnes 205363eeaf38SJesse Barnes I915_WRITE(EIR, eir); 20543143a2bfSChris Wilson POSTING_READ(EIR); 205563eeaf38SJesse Barnes eir = I915_READ(EIR); 205663eeaf38SJesse Barnes if (eir) { 205763eeaf38SJesse Barnes /* 205863eeaf38SJesse Barnes * some errors might have become stuck, 205963eeaf38SJesse Barnes * mask them. 206063eeaf38SJesse Barnes */ 206163eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 206263eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 206363eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 206463eeaf38SJesse Barnes } 206535aed2e6SChris Wilson } 206635aed2e6SChris Wilson 206735aed2e6SChris Wilson /** 206835aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 206935aed2e6SChris Wilson * @dev: drm device 207035aed2e6SChris Wilson * 207135aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 207235aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 207335aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 207435aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 207535aed2e6SChris Wilson * of a ring dump etc.). 207635aed2e6SChris Wilson */ 2077527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 207835aed2e6SChris Wilson { 207935aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 208035aed2e6SChris Wilson 208135aed2e6SChris Wilson i915_capture_error_state(dev); 208235aed2e6SChris Wilson i915_report_and_clear_eir(dev); 20838a905236SJesse Barnes 2084ba1234d1SBen Gamari if (wedged) { 2085f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2086f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 2087ba1234d1SBen Gamari 208811ed50ecSBen Gamari /* 208917e1df07SDaniel Vetter * Wakeup waiting processes so that the reset work function 209017e1df07SDaniel Vetter * i915_error_work_func doesn't deadlock trying to grab various 209117e1df07SDaniel Vetter * locks. By bumping the reset counter first, the woken 209217e1df07SDaniel Vetter * processes will see a reset in progress and back off, 209317e1df07SDaniel Vetter * releasing their locks and then wait for the reset completion. 209417e1df07SDaniel Vetter * We must do this for _all_ gpu waiters that might hold locks 209517e1df07SDaniel Vetter * that the reset work needs to acquire. 209617e1df07SDaniel Vetter * 209717e1df07SDaniel Vetter * Note: The wake_up serves as the required memory barrier to 209817e1df07SDaniel Vetter * ensure that the waiters see the updated value of the reset 209917e1df07SDaniel Vetter * counter atomic_t. 210011ed50ecSBen Gamari */ 210117e1df07SDaniel Vetter i915_error_wake_up(dev_priv, false); 210211ed50ecSBen Gamari } 210311ed50ecSBen Gamari 2104122f46baSDaniel Vetter /* 2105122f46baSDaniel Vetter * Our reset work can grab modeset locks (since it needs to reset the 2106122f46baSDaniel Vetter * state of outstanding pagelips). Hence it must not be run on our own 2107122f46baSDaniel Vetter * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2108122f46baSDaniel Vetter * code will deadlock. 2109122f46baSDaniel Vetter */ 2110122f46baSDaniel Vetter schedule_work(&dev_priv->gpu_error.work); 21118a905236SJesse Barnes } 21128a905236SJesse Barnes 211321ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 21144e5359cdSSimon Farnsworth { 21154e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 21164e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 21174e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 211805394f39SChris Wilson struct drm_i915_gem_object *obj; 21194e5359cdSSimon Farnsworth struct intel_unpin_work *work; 21204e5359cdSSimon Farnsworth unsigned long flags; 21214e5359cdSSimon Farnsworth bool stall_detected; 21224e5359cdSSimon Farnsworth 21234e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 21244e5359cdSSimon Farnsworth if (intel_crtc == NULL) 21254e5359cdSSimon Farnsworth return; 21264e5359cdSSimon Farnsworth 21274e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 21284e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 21294e5359cdSSimon Farnsworth 2130e7d841caSChris Wilson if (work == NULL || 2131e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2132e7d841caSChris Wilson !work->enable_stall_check) { 21334e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 21344e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 21354e5359cdSSimon Farnsworth return; 21364e5359cdSSimon Farnsworth } 21374e5359cdSSimon Farnsworth 21384e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 213905394f39SChris Wilson obj = work->pending_flip_obj; 2140a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 21419db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 2142446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2143f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 21444e5359cdSSimon Farnsworth } else { 21459db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 2146f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 214701f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 21484e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 21494e5359cdSSimon Farnsworth } 21504e5359cdSSimon Farnsworth 21514e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 21524e5359cdSSimon Farnsworth 21534e5359cdSSimon Farnsworth if (stall_detected) { 21544e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 21554e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 21564e5359cdSSimon Farnsworth } 21574e5359cdSSimon Farnsworth } 21584e5359cdSSimon Farnsworth 215942f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 216042f52ef8SKeith Packard * we use as a pipe index 216142f52ef8SKeith Packard */ 2162f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 21630a3e67a4SJesse Barnes { 21640a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2165e9d21d7fSKeith Packard unsigned long irqflags; 216671e0ffa5SJesse Barnes 21675eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 216871e0ffa5SJesse Barnes return -EINVAL; 21690a3e67a4SJesse Barnes 21701ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2171f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 21727c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 21737c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 21740a3e67a4SJesse Barnes else 21757c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 21767c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 21778692d00eSChris Wilson 21788692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 21798692d00eSChris Wilson if (dev_priv->info->gen == 3) 21806b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 21811ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 21828692d00eSChris Wilson 21830a3e67a4SJesse Barnes return 0; 21840a3e67a4SJesse Barnes } 21850a3e67a4SJesse Barnes 2186f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2187f796cf8fSJesse Barnes { 2188f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2189f796cf8fSJesse Barnes unsigned long irqflags; 2190b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 219140da17c2SDaniel Vetter DE_PIPE_VBLANK(pipe); 2192f796cf8fSJesse Barnes 2193f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2194f796cf8fSJesse Barnes return -EINVAL; 2195f796cf8fSJesse Barnes 2196f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2197b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2198b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2199b1f14ad0SJesse Barnes 2200b1f14ad0SJesse Barnes return 0; 2201b1f14ad0SJesse Barnes } 2202b1f14ad0SJesse Barnes 22037e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 22047e231dbeSJesse Barnes { 22057e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22067e231dbeSJesse Barnes unsigned long irqflags; 220731acc7f5SJesse Barnes u32 imr; 22087e231dbeSJesse Barnes 22097e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 22107e231dbeSJesse Barnes return -EINVAL; 22117e231dbeSJesse Barnes 22127e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22137e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 22143b6c42e8SDaniel Vetter if (pipe == PIPE_A) 22157e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 221631acc7f5SJesse Barnes else 22177e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22187e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 221931acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 222031acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 22217e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22227e231dbeSJesse Barnes 22237e231dbeSJesse Barnes return 0; 22247e231dbeSJesse Barnes } 22257e231dbeSJesse Barnes 2226abd58f01SBen Widawsky static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2227abd58f01SBen Widawsky { 2228abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 2229abd58f01SBen Widawsky unsigned long irqflags; 2230abd58f01SBen Widawsky 2231abd58f01SBen Widawsky if (!i915_pipe_enabled(dev, pipe)) 2232abd58f01SBen Widawsky return -EINVAL; 2233abd58f01SBen Widawsky 2234abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22357167d7c6SDaniel Vetter dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 22367167d7c6SDaniel Vetter I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2237abd58f01SBen Widawsky POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2238abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2239abd58f01SBen Widawsky return 0; 2240abd58f01SBen Widawsky } 2241abd58f01SBen Widawsky 224242f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 224342f52ef8SKeith Packard * we use as a pipe index 224442f52ef8SKeith Packard */ 2245f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 22460a3e67a4SJesse Barnes { 22470a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2248e9d21d7fSKeith Packard unsigned long irqflags; 22490a3e67a4SJesse Barnes 22501ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22518692d00eSChris Wilson if (dev_priv->info->gen == 3) 22526b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 22538692d00eSChris Wilson 22547c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 22557c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 22567c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 22571ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22580a3e67a4SJesse Barnes } 22590a3e67a4SJesse Barnes 2260f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2261f796cf8fSJesse Barnes { 2262f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2263f796cf8fSJesse Barnes unsigned long irqflags; 2264b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 226540da17c2SDaniel Vetter DE_PIPE_VBLANK(pipe); 2266f796cf8fSJesse Barnes 2267f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2268b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2269b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2270b1f14ad0SJesse Barnes } 2271b1f14ad0SJesse Barnes 22727e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 22737e231dbeSJesse Barnes { 22747e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22757e231dbeSJesse Barnes unsigned long irqflags; 227631acc7f5SJesse Barnes u32 imr; 22777e231dbeSJesse Barnes 22787e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 227931acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 228031acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 22817e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 22823b6c42e8SDaniel Vetter if (pipe == PIPE_A) 22837e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 228431acc7f5SJesse Barnes else 22857e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22867e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 22877e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22887e231dbeSJesse Barnes } 22897e231dbeSJesse Barnes 2290abd58f01SBen Widawsky static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2291abd58f01SBen Widawsky { 2292abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 2293abd58f01SBen Widawsky unsigned long irqflags; 2294abd58f01SBen Widawsky 2295abd58f01SBen Widawsky if (!i915_pipe_enabled(dev, pipe)) 2296abd58f01SBen Widawsky return; 2297abd58f01SBen Widawsky 2298abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22997167d7c6SDaniel Vetter dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 23007167d7c6SDaniel Vetter I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2301abd58f01SBen Widawsky POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2302abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2303abd58f01SBen Widawsky } 2304abd58f01SBen Widawsky 2305893eead0SChris Wilson static u32 2306893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 2307852835f3SZou Nan hai { 2308893eead0SChris Wilson return list_entry(ring->request_list.prev, 2309893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 2310893eead0SChris Wilson } 2311893eead0SChris Wilson 23129107e9d2SChris Wilson static bool 23139107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2314893eead0SChris Wilson { 23159107e9d2SChris Wilson return (list_empty(&ring->request_list) || 23169107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 2317f65d9421SBen Gamari } 2318f65d9421SBen Gamari 23196274f212SChris Wilson static struct intel_ring_buffer * 23206274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2321a24a11e6SChris Wilson { 2322a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 23236274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 2324a24a11e6SChris Wilson 2325a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2326a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 2327a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 23286274f212SChris Wilson return NULL; 2329a24a11e6SChris Wilson 2330a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 2331a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 2332a24a11e6SChris Wilson */ 23336274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2334a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 2335a24a11e6SChris Wilson do { 2336a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 2337a24a11e6SChris Wilson if (cmd == ipehr) 2338a24a11e6SChris Wilson break; 2339a24a11e6SChris Wilson 2340a24a11e6SChris Wilson acthd -= 4; 2341a24a11e6SChris Wilson if (acthd < acthd_min) 23426274f212SChris Wilson return NULL; 2343a24a11e6SChris Wilson } while (1); 2344a24a11e6SChris Wilson 23456274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 23466274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2347a24a11e6SChris Wilson } 2348a24a11e6SChris Wilson 23496274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 23506274f212SChris Wilson { 23516274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 23526274f212SChris Wilson struct intel_ring_buffer *signaller; 23536274f212SChris Wilson u32 seqno, ctl; 23546274f212SChris Wilson 23556274f212SChris Wilson ring->hangcheck.deadlock = true; 23566274f212SChris Wilson 23576274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 23586274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 23596274f212SChris Wilson return -1; 23606274f212SChris Wilson 23616274f212SChris Wilson /* cursory check for an unkickable deadlock */ 23626274f212SChris Wilson ctl = I915_READ_CTL(signaller); 23636274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 23646274f212SChris Wilson return -1; 23656274f212SChris Wilson 23666274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 23676274f212SChris Wilson } 23686274f212SChris Wilson 23696274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 23706274f212SChris Wilson { 23716274f212SChris Wilson struct intel_ring_buffer *ring; 23726274f212SChris Wilson int i; 23736274f212SChris Wilson 23746274f212SChris Wilson for_each_ring(ring, dev_priv, i) 23756274f212SChris Wilson ring->hangcheck.deadlock = false; 23766274f212SChris Wilson } 23776274f212SChris Wilson 2378ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 2379ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 23801ec14ad3SChris Wilson { 23811ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 23821ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 23839107e9d2SChris Wilson u32 tmp; 23849107e9d2SChris Wilson 23856274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 2386f2f4d82fSJani Nikula return HANGCHECK_ACTIVE; 23876274f212SChris Wilson 23889107e9d2SChris Wilson if (IS_GEN2(dev)) 2389f2f4d82fSJani Nikula return HANGCHECK_HUNG; 23909107e9d2SChris Wilson 23919107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 23929107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 23939107e9d2SChris Wilson * and break the hang. This should work on 23949107e9d2SChris Wilson * all but the second generation chipsets. 23959107e9d2SChris Wilson */ 23969107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 23971ec14ad3SChris Wilson if (tmp & RING_WAIT) { 23981ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 23991ec14ad3SChris Wilson ring->name); 240009e14bf3SChris Wilson i915_handle_error(dev, false); 24011ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 2402f2f4d82fSJani Nikula return HANGCHECK_KICK; 24031ec14ad3SChris Wilson } 2404a24a11e6SChris Wilson 24056274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 24066274f212SChris Wilson switch (semaphore_passed(ring)) { 24076274f212SChris Wilson default: 2408f2f4d82fSJani Nikula return HANGCHECK_HUNG; 24096274f212SChris Wilson case 1: 2410a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 2411a24a11e6SChris Wilson ring->name); 241209e14bf3SChris Wilson i915_handle_error(dev, false); 2413a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 2414f2f4d82fSJani Nikula return HANGCHECK_KICK; 24156274f212SChris Wilson case 0: 2416f2f4d82fSJani Nikula return HANGCHECK_WAIT; 24176274f212SChris Wilson } 24189107e9d2SChris Wilson } 24199107e9d2SChris Wilson 2420f2f4d82fSJani Nikula return HANGCHECK_HUNG; 2421a24a11e6SChris Wilson } 2422d1e61e7fSChris Wilson 2423f65d9421SBen Gamari /** 2424f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 242505407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 242605407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 242705407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 242805407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 242905407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 2430f65d9421SBen Gamari */ 2431a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data) 2432f65d9421SBen Gamari { 2433f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2434f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2435b4519513SChris Wilson struct intel_ring_buffer *ring; 2436b4519513SChris Wilson int i; 243705407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 24389107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 24399107e9d2SChris Wilson #define BUSY 1 24409107e9d2SChris Wilson #define KICK 5 24419107e9d2SChris Wilson #define HUNG 20 24429107e9d2SChris Wilson #define FIRE 30 2443893eead0SChris Wilson 24443e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 24453e0dc6b0SBen Widawsky return; 24463e0dc6b0SBen Widawsky 2447b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 244805407ff8SMika Kuoppala u32 seqno, acthd; 24499107e9d2SChris Wilson bool busy = true; 2450b4519513SChris Wilson 24516274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 24526274f212SChris Wilson 245305407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 245405407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 245505407ff8SMika Kuoppala 245605407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 24579107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 2458da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_IDLE; 2459da661464SMika Kuoppala 24609107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 24619107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 2462094f9a54SChris Wilson if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2463f4adcd24SDaniel Vetter if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 24649107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 24659107e9d2SChris Wilson ring->name); 2466f4adcd24SDaniel Vetter else 2467f4adcd24SDaniel Vetter DRM_INFO("Fake missed irq on %s\n", 2468f4adcd24SDaniel Vetter ring->name); 24699107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 2470094f9a54SChris Wilson } 2471094f9a54SChris Wilson /* Safeguard against driver failure */ 2472094f9a54SChris Wilson ring->hangcheck.score += BUSY; 24739107e9d2SChris Wilson } else 24749107e9d2SChris Wilson busy = false; 247505407ff8SMika Kuoppala } else { 24766274f212SChris Wilson /* We always increment the hangcheck score 24776274f212SChris Wilson * if the ring is busy and still processing 24786274f212SChris Wilson * the same request, so that no single request 24796274f212SChris Wilson * can run indefinitely (such as a chain of 24806274f212SChris Wilson * batches). The only time we do not increment 24816274f212SChris Wilson * the hangcheck score on this ring, if this 24826274f212SChris Wilson * ring is in a legitimate wait for another 24836274f212SChris Wilson * ring. In that case the waiting ring is a 24846274f212SChris Wilson * victim and we want to be sure we catch the 24856274f212SChris Wilson * right culprit. Then every time we do kick 24866274f212SChris Wilson * the ring, add a small increment to the 24876274f212SChris Wilson * score so that we can catch a batch that is 24886274f212SChris Wilson * being repeatedly kicked and so responsible 24896274f212SChris Wilson * for stalling the machine. 24909107e9d2SChris Wilson */ 2491ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 2492ad8beaeaSMika Kuoppala acthd); 2493ad8beaeaSMika Kuoppala 2494ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 2495da661464SMika Kuoppala case HANGCHECK_IDLE: 2496f2f4d82fSJani Nikula case HANGCHECK_WAIT: 24976274f212SChris Wilson break; 2498f2f4d82fSJani Nikula case HANGCHECK_ACTIVE: 2499ea04cb31SJani Nikula ring->hangcheck.score += BUSY; 25006274f212SChris Wilson break; 2501f2f4d82fSJani Nikula case HANGCHECK_KICK: 2502ea04cb31SJani Nikula ring->hangcheck.score += KICK; 25036274f212SChris Wilson break; 2504f2f4d82fSJani Nikula case HANGCHECK_HUNG: 2505ea04cb31SJani Nikula ring->hangcheck.score += HUNG; 25066274f212SChris Wilson stuck[i] = true; 25076274f212SChris Wilson break; 25086274f212SChris Wilson } 250905407ff8SMika Kuoppala } 25109107e9d2SChris Wilson } else { 2511da661464SMika Kuoppala ring->hangcheck.action = HANGCHECK_ACTIVE; 2512da661464SMika Kuoppala 25139107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 25149107e9d2SChris Wilson * attempts across multiple batches. 25159107e9d2SChris Wilson */ 25169107e9d2SChris Wilson if (ring->hangcheck.score > 0) 25179107e9d2SChris Wilson ring->hangcheck.score--; 2518cbb465e7SChris Wilson } 2519f65d9421SBen Gamari 252005407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 252105407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 25229107e9d2SChris Wilson busy_count += busy; 252305407ff8SMika Kuoppala } 252405407ff8SMika Kuoppala 252505407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 25269107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2527b8d88d1dSDaniel Vetter DRM_INFO("%s on %s\n", 252805407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2529a43adf07SChris Wilson ring->name); 2530a43adf07SChris Wilson rings_hung++; 253105407ff8SMika Kuoppala } 253205407ff8SMika Kuoppala } 253305407ff8SMika Kuoppala 253405407ff8SMika Kuoppala if (rings_hung) 253505407ff8SMika Kuoppala return i915_handle_error(dev, true); 253605407ff8SMika Kuoppala 253705407ff8SMika Kuoppala if (busy_count) 253805407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 253905407ff8SMika Kuoppala * being added */ 254010cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 254110cd45b6SMika Kuoppala } 254210cd45b6SMika Kuoppala 254310cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 254410cd45b6SMika Kuoppala { 254510cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 254610cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 254710cd45b6SMika Kuoppala return; 254810cd45b6SMika Kuoppala 254999584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 255010cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2551f65d9421SBen Gamari } 2552f65d9421SBen Gamari 255391738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 255491738a95SPaulo Zanoni { 255591738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 255691738a95SPaulo Zanoni 255791738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 255891738a95SPaulo Zanoni return; 255991738a95SPaulo Zanoni 256091738a95SPaulo Zanoni /* south display irq */ 256191738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 256291738a95SPaulo Zanoni /* 256391738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 256491738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 256591738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 256691738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 256791738a95SPaulo Zanoni */ 256891738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 256991738a95SPaulo Zanoni POSTING_READ(SDEIER); 257091738a95SPaulo Zanoni } 257191738a95SPaulo Zanoni 2572d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2573d18ea1b5SDaniel Vetter { 2574d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2575d18ea1b5SDaniel Vetter 2576d18ea1b5SDaniel Vetter /* and GT */ 2577d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2578d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2579d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2580d18ea1b5SDaniel Vetter 2581d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2582d18ea1b5SDaniel Vetter /* and PM */ 2583d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2584d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2585d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2586d18ea1b5SDaniel Vetter } 2587d18ea1b5SDaniel Vetter } 2588d18ea1b5SDaniel Vetter 2589c0e09200SDave Airlie /* drm_dma.h hooks 2590c0e09200SDave Airlie */ 2591f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2592036a4a7dSZhenyu Wang { 2593036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2594036a4a7dSZhenyu Wang 25954697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 25964697995bSJesse Barnes 2597036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2598bdfcdb63SDaniel Vetter 2599036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2600036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 26013143a2bfSChris Wilson POSTING_READ(DEIER); 2602036a4a7dSZhenyu Wang 2603d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2604c650156aSZhenyu Wang 260591738a95SPaulo Zanoni ibx_irq_preinstall(dev); 26067d99163dSBen Widawsky } 26077d99163dSBen Widawsky 26087e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 26097e231dbeSJesse Barnes { 26107e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26117e231dbeSJesse Barnes int pipe; 26127e231dbeSJesse Barnes 26137e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 26147e231dbeSJesse Barnes 26157e231dbeSJesse Barnes /* VLV magic */ 26167e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 26177e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 26187e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 26197e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 26207e231dbeSJesse Barnes 26217e231dbeSJesse Barnes /* and GT */ 26227e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 26237e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2624d18ea1b5SDaniel Vetter 2625d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 26267e231dbeSJesse Barnes 26277e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 26287e231dbeSJesse Barnes 26297e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 26307e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 26317e231dbeSJesse Barnes for_each_pipe(pipe) 26327e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 26337e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26347e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 26357e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 26367e231dbeSJesse Barnes POSTING_READ(VLV_IER); 26377e231dbeSJesse Barnes } 26387e231dbeSJesse Barnes 2639abd58f01SBen Widawsky static void gen8_irq_preinstall(struct drm_device *dev) 2640abd58f01SBen Widawsky { 2641abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 2642abd58f01SBen Widawsky int pipe; 2643abd58f01SBen Widawsky 2644abd58f01SBen Widawsky atomic_set(&dev_priv->irq_received, 0); 2645abd58f01SBen Widawsky 2646abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, 0); 2647abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 2648abd58f01SBen Widawsky 2649abd58f01SBen Widawsky /* IIR can theoretically queue up two events. Be paranoid */ 2650abd58f01SBen Widawsky #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2651abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2652abd58f01SBen Widawsky POSTING_READ(GEN8_##type##_IMR(which)); \ 2653abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IER(which), 0); \ 2654abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2655abd58f01SBen Widawsky POSTING_READ(GEN8_##type##_IIR(which)); \ 2656abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2657abd58f01SBen Widawsky } while (0) 2658abd58f01SBen Widawsky 2659abd58f01SBen Widawsky #define GEN8_IRQ_INIT(type) do { \ 2660abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2661abd58f01SBen Widawsky POSTING_READ(GEN8_##type##_IMR); \ 2662abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IER, 0); \ 2663abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2664abd58f01SBen Widawsky POSTING_READ(GEN8_##type##_IIR); \ 2665abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2666abd58f01SBen Widawsky } while (0) 2667abd58f01SBen Widawsky 2668abd58f01SBen Widawsky GEN8_IRQ_INIT_NDX(GT, 0); 2669abd58f01SBen Widawsky GEN8_IRQ_INIT_NDX(GT, 1); 2670abd58f01SBen Widawsky GEN8_IRQ_INIT_NDX(GT, 2); 2671abd58f01SBen Widawsky GEN8_IRQ_INIT_NDX(GT, 3); 2672abd58f01SBen Widawsky 2673abd58f01SBen Widawsky for_each_pipe(pipe) { 2674abd58f01SBen Widawsky GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2675abd58f01SBen Widawsky } 2676abd58f01SBen Widawsky 2677abd58f01SBen Widawsky GEN8_IRQ_INIT(DE_PORT); 2678abd58f01SBen Widawsky GEN8_IRQ_INIT(DE_MISC); 2679abd58f01SBen Widawsky GEN8_IRQ_INIT(PCU); 2680abd58f01SBen Widawsky #undef GEN8_IRQ_INIT 2681abd58f01SBen Widawsky #undef GEN8_IRQ_INIT_NDX 2682abd58f01SBen Widawsky 2683abd58f01SBen Widawsky POSTING_READ(GEN8_PCU_IIR); 2684abd58f01SBen Widawsky } 2685abd58f01SBen Widawsky 268682a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 268782a28bcfSDaniel Vetter { 268882a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 268982a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 269082a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2691fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 269282a28bcfSDaniel Vetter 269382a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2694fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 269582a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2696cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2697fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 269882a28bcfSDaniel Vetter } else { 2699fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 270082a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2701cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2702fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 270382a28bcfSDaniel Vetter } 270482a28bcfSDaniel Vetter 2705fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 270682a28bcfSDaniel Vetter 27077fe0b973SKeith Packard /* 27087fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 27097fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 27107fe0b973SKeith Packard * 27117fe0b973SKeith Packard * This register is the same on all known PCH chips. 27127fe0b973SKeith Packard */ 27137fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 27147fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 27157fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 27167fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 27177fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 27187fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 27197fe0b973SKeith Packard } 27207fe0b973SKeith Packard 2721d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2722d46da437SPaulo Zanoni { 2723d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 272482a28bcfSDaniel Vetter u32 mask; 2725d46da437SPaulo Zanoni 2726692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2727692a04cfSDaniel Vetter return; 2728692a04cfSDaniel Vetter 27298664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 27308664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2731de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 27328664281bSPaulo Zanoni } else { 27338664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 27348664281bSPaulo Zanoni 27358664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 27368664281bSPaulo Zanoni } 2737ab5c608bSBen Widawsky 2738d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2739d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2740d46da437SPaulo Zanoni } 2741d46da437SPaulo Zanoni 27420a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 27430a9a8c91SDaniel Vetter { 27440a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 27450a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 27460a9a8c91SDaniel Vetter 27470a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 27480a9a8c91SDaniel Vetter 27490a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 2750040d2baaSBen Widawsky if (HAS_L3_DPF(dev)) { 27510a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 275235a85ac6SBen Widawsky dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 275335a85ac6SBen Widawsky gt_irqs |= GT_PARITY_ERROR(dev); 27540a9a8c91SDaniel Vetter } 27550a9a8c91SDaniel Vetter 27560a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 27570a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 27580a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 27590a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 27600a9a8c91SDaniel Vetter } else { 27610a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 27620a9a8c91SDaniel Vetter } 27630a9a8c91SDaniel Vetter 27640a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 27650a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 27660a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 27670a9a8c91SDaniel Vetter POSTING_READ(GTIER); 27680a9a8c91SDaniel Vetter 27690a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 27700a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 27710a9a8c91SDaniel Vetter 27720a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 27730a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 27740a9a8c91SDaniel Vetter 2775605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = 0xffffffff; 27760a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2777605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 27780a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 27790a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 27800a9a8c91SDaniel Vetter } 27810a9a8c91SDaniel Vetter } 27820a9a8c91SDaniel Vetter 2783f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2784036a4a7dSZhenyu Wang { 27854bc9d430SDaniel Vetter unsigned long irqflags; 2786036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 27878e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 27888e76f8dcSPaulo Zanoni 27898e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 27908e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 27918e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 27928e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 27938e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 27948e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 27958e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 27968e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 27978e76f8dcSPaulo Zanoni 27988e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 27998e76f8dcSPaulo Zanoni } else { 28008e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2801ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 28025b3a856bSDaniel Vetter DE_AUX_CHANNEL_A | 28035b3a856bSDaniel Vetter DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 28045b3a856bSDaniel Vetter DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 28055b3a856bSDaniel Vetter DE_POISON); 28068e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 28078e76f8dcSPaulo Zanoni } 2808036a4a7dSZhenyu Wang 28091ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2810036a4a7dSZhenyu Wang 2811036a4a7dSZhenyu Wang /* should always can generate irq */ 2812036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 28131ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 28148e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 28153143a2bfSChris Wilson POSTING_READ(DEIER); 2816036a4a7dSZhenyu Wang 28170a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2818036a4a7dSZhenyu Wang 2819d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 28207fe0b973SKeith Packard 2821f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 28226005ce42SDaniel Vetter /* Enable PCU event interrupts 28236005ce42SDaniel Vetter * 28246005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 28254bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 28264bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 28274bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2828f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 28294bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2830f97108d1SJesse Barnes } 2831f97108d1SJesse Barnes 2832036a4a7dSZhenyu Wang return 0; 2833036a4a7dSZhenyu Wang } 2834036a4a7dSZhenyu Wang 28357e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 28367e231dbeSJesse Barnes { 28377e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 28387e231dbeSJesse Barnes u32 enable_mask; 2839379ef82dSDaniel Vetter u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2840379ef82dSDaniel Vetter PIPE_CRC_DONE_ENABLE; 2841b79480baSDaniel Vetter unsigned long irqflags; 28427e231dbeSJesse Barnes 28437e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 284431acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 284531acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 284631acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 28477e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28487e231dbeSJesse Barnes 284931acc7f5SJesse Barnes /* 285031acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 285131acc7f5SJesse Barnes * toggle them based on usage. 285231acc7f5SJesse Barnes */ 285331acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 285431acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 285531acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28567e231dbeSJesse Barnes 285720afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 285820afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 285920afbda2SDaniel Vetter 28607e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 28617e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 28627e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28637e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 28647e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 28657e231dbeSJesse Barnes POSTING_READ(VLV_IER); 28667e231dbeSJesse Barnes 2867b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2868b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2869b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 28703b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 28713b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 28723b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2873b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 287431acc7f5SJesse Barnes 28757e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28767e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28777e231dbeSJesse Barnes 28780a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 28797e231dbeSJesse Barnes 28807e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 28817e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 28827e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 28837e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 28847e231dbeSJesse Barnes #endif 28857e231dbeSJesse Barnes 28867e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 288720afbda2SDaniel Vetter 288820afbda2SDaniel Vetter return 0; 288920afbda2SDaniel Vetter } 289020afbda2SDaniel Vetter 2891abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2892abd58f01SBen Widawsky { 2893abd58f01SBen Widawsky int i; 2894abd58f01SBen Widawsky 2895abd58f01SBen Widawsky /* These are interrupts we'll toggle with the ring mask register */ 2896abd58f01SBen Widawsky uint32_t gt_interrupts[] = { 2897abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2898abd58f01SBen Widawsky GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2899abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2900abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2901abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2902abd58f01SBen Widawsky 0, 2903abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2904abd58f01SBen Widawsky }; 2905abd58f01SBen Widawsky 2906abd58f01SBen Widawsky for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2907abd58f01SBen Widawsky u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2908abd58f01SBen Widawsky if (tmp) 2909abd58f01SBen Widawsky DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2910abd58f01SBen Widawsky i, tmp); 2911abd58f01SBen Widawsky I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2912abd58f01SBen Widawsky I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2913abd58f01SBen Widawsky } 2914abd58f01SBen Widawsky POSTING_READ(GEN8_GT_IER(0)); 2915abd58f01SBen Widawsky } 2916abd58f01SBen Widawsky 2917abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2918abd58f01SBen Widawsky { 2919abd58f01SBen Widawsky struct drm_device *dev = dev_priv->dev; 2920*13b3a0a7SDaniel Vetter uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 29210fbe7870SDaniel Vetter GEN8_PIPE_CDCLK_CRC_DONE | 292238d83c96SDaniel Vetter GEN8_PIPE_FIFO_UNDERRUN | 292330100f2bSDaniel Vetter GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2924*13b3a0a7SDaniel Vetter uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; 2925abd58f01SBen Widawsky int pipe; 2926*13b3a0a7SDaniel Vetter dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2927*13b3a0a7SDaniel Vetter dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2928*13b3a0a7SDaniel Vetter dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2929abd58f01SBen Widawsky 2930abd58f01SBen Widawsky for_each_pipe(pipe) { 2931abd58f01SBen Widawsky u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2932abd58f01SBen Widawsky if (tmp) 2933abd58f01SBen Widawsky DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2934abd58f01SBen Widawsky pipe, tmp); 2935abd58f01SBen Widawsky I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2936abd58f01SBen Widawsky I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2937abd58f01SBen Widawsky } 2938abd58f01SBen Widawsky POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2939abd58f01SBen Widawsky 29406d766f02SDaniel Vetter I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 29416d766f02SDaniel Vetter I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2942abd58f01SBen Widawsky POSTING_READ(GEN8_DE_PORT_IER); 2943abd58f01SBen Widawsky } 2944abd58f01SBen Widawsky 2945abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev) 2946abd58f01SBen Widawsky { 2947abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 2948abd58f01SBen Widawsky 2949abd58f01SBen Widawsky gen8_gt_irq_postinstall(dev_priv); 2950abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 2951abd58f01SBen Widawsky 2952abd58f01SBen Widawsky ibx_irq_postinstall(dev); 2953abd58f01SBen Widawsky 2954abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2955abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 2956abd58f01SBen Widawsky 2957abd58f01SBen Widawsky return 0; 2958abd58f01SBen Widawsky } 2959abd58f01SBen Widawsky 2960abd58f01SBen Widawsky static void gen8_irq_uninstall(struct drm_device *dev) 2961abd58f01SBen Widawsky { 2962abd58f01SBen Widawsky struct drm_i915_private *dev_priv = dev->dev_private; 2963abd58f01SBen Widawsky int pipe; 2964abd58f01SBen Widawsky 2965abd58f01SBen Widawsky if (!dev_priv) 2966abd58f01SBen Widawsky return; 2967abd58f01SBen Widawsky 2968abd58f01SBen Widawsky atomic_set(&dev_priv->irq_received, 0); 2969abd58f01SBen Widawsky 2970abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, 0); 2971abd58f01SBen Widawsky 2972abd58f01SBen Widawsky #define GEN8_IRQ_FINI_NDX(type, which) do { \ 2973abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2974abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IER(which), 0); \ 2975abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2976abd58f01SBen Widawsky } while (0) 2977abd58f01SBen Widawsky 2978abd58f01SBen Widawsky #define GEN8_IRQ_FINI(type) do { \ 2979abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2980abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IER, 0); \ 2981abd58f01SBen Widawsky I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2982abd58f01SBen Widawsky } while (0) 2983abd58f01SBen Widawsky 2984abd58f01SBen Widawsky GEN8_IRQ_FINI_NDX(GT, 0); 2985abd58f01SBen Widawsky GEN8_IRQ_FINI_NDX(GT, 1); 2986abd58f01SBen Widawsky GEN8_IRQ_FINI_NDX(GT, 2); 2987abd58f01SBen Widawsky GEN8_IRQ_FINI_NDX(GT, 3); 2988abd58f01SBen Widawsky 2989abd58f01SBen Widawsky for_each_pipe(pipe) { 2990abd58f01SBen Widawsky GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 2991abd58f01SBen Widawsky } 2992abd58f01SBen Widawsky 2993abd58f01SBen Widawsky GEN8_IRQ_FINI(DE_PORT); 2994abd58f01SBen Widawsky GEN8_IRQ_FINI(DE_MISC); 2995abd58f01SBen Widawsky GEN8_IRQ_FINI(PCU); 2996abd58f01SBen Widawsky #undef GEN8_IRQ_FINI 2997abd58f01SBen Widawsky #undef GEN8_IRQ_FINI_NDX 2998abd58f01SBen Widawsky 2999abd58f01SBen Widawsky POSTING_READ(GEN8_PCU_IIR); 3000abd58f01SBen Widawsky } 3001abd58f01SBen Widawsky 30027e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 30037e231dbeSJesse Barnes { 30047e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 30057e231dbeSJesse Barnes int pipe; 30067e231dbeSJesse Barnes 30077e231dbeSJesse Barnes if (!dev_priv) 30087e231dbeSJesse Barnes return; 30097e231dbeSJesse Barnes 3010ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3011ac4c16c5SEgbert Eich 30127e231dbeSJesse Barnes for_each_pipe(pipe) 30137e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 30147e231dbeSJesse Barnes 30157e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 30167e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 30177e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 30187e231dbeSJesse Barnes for_each_pipe(pipe) 30197e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 30207e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 30217e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 30227e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 30237e231dbeSJesse Barnes POSTING_READ(VLV_IER); 30247e231dbeSJesse Barnes } 30257e231dbeSJesse Barnes 3026f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 3027036a4a7dSZhenyu Wang { 3028036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 30294697995bSJesse Barnes 30304697995bSJesse Barnes if (!dev_priv) 30314697995bSJesse Barnes return; 30324697995bSJesse Barnes 3033ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3034ac4c16c5SEgbert Eich 3035036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 3036036a4a7dSZhenyu Wang 3037036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 3038036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 3039036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 30408664281bSPaulo Zanoni if (IS_GEN7(dev)) 30418664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3042036a4a7dSZhenyu Wang 3043036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 3044036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 3045036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 3046192aac1fSKeith Packard 3047ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 3048ab5c608bSBen Widawsky return; 3049ab5c608bSBen Widawsky 3050192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 3051192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 3052192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 30538664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 30548664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3055036a4a7dSZhenyu Wang } 3056036a4a7dSZhenyu Wang 3057c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 3058c2798b19SChris Wilson { 3059c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3060c2798b19SChris Wilson int pipe; 3061c2798b19SChris Wilson 3062c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3063c2798b19SChris Wilson 3064c2798b19SChris Wilson for_each_pipe(pipe) 3065c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3066c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 3067c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 3068c2798b19SChris Wilson POSTING_READ16(IER); 3069c2798b19SChris Wilson } 3070c2798b19SChris Wilson 3071c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 3072c2798b19SChris Wilson { 3073c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3074379ef82dSDaniel Vetter unsigned long irqflags; 3075c2798b19SChris Wilson 3076c2798b19SChris Wilson I915_WRITE16(EMR, 3077c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3078c2798b19SChris Wilson 3079c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 3080c2798b19SChris Wilson dev_priv->irq_mask = 3081c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3082c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3083c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3084c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3085c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3086c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 3087c2798b19SChris Wilson 3088c2798b19SChris Wilson I915_WRITE16(IER, 3089c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3090c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3091c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3092c2798b19SChris Wilson I915_USER_INTERRUPT); 3093c2798b19SChris Wilson POSTING_READ16(IER); 3094c2798b19SChris Wilson 3095379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3096379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 3097379ef82dSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 30983b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 30993b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3100379ef82dSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3101379ef82dSDaniel Vetter 3102c2798b19SChris Wilson return 0; 3103c2798b19SChris Wilson } 3104c2798b19SChris Wilson 310590a72f87SVille Syrjälä /* 310690a72f87SVille Syrjälä * Returns true when a page flip has completed. 310790a72f87SVille Syrjälä */ 310890a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 310990a72f87SVille Syrjälä int pipe, u16 iir) 311090a72f87SVille Syrjälä { 311190a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 311290a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 311390a72f87SVille Syrjälä 311490a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 311590a72f87SVille Syrjälä return false; 311690a72f87SVille Syrjälä 311790a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 311890a72f87SVille Syrjälä return false; 311990a72f87SVille Syrjälä 312090a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 312190a72f87SVille Syrjälä 312290a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 312390a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 312490a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 312590a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 312690a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 312790a72f87SVille Syrjälä */ 312890a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 312990a72f87SVille Syrjälä return false; 313090a72f87SVille Syrjälä 313190a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 313290a72f87SVille Syrjälä 313390a72f87SVille Syrjälä return true; 313490a72f87SVille Syrjälä } 313590a72f87SVille Syrjälä 3136ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3137c2798b19SChris Wilson { 3138c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3139c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3140c2798b19SChris Wilson u16 iir, new_iir; 3141c2798b19SChris Wilson u32 pipe_stats[2]; 3142c2798b19SChris Wilson unsigned long irqflags; 3143c2798b19SChris Wilson int pipe; 3144c2798b19SChris Wilson u16 flip_mask = 3145c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3146c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3147c2798b19SChris Wilson 3148c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 3149c2798b19SChris Wilson 3150c2798b19SChris Wilson iir = I915_READ16(IIR); 3151c2798b19SChris Wilson if (iir == 0) 3152c2798b19SChris Wilson return IRQ_NONE; 3153c2798b19SChris Wilson 3154c2798b19SChris Wilson while (iir & ~flip_mask) { 3155c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3156c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 3157c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 3158c2798b19SChris Wilson * interrupts (for non-MSI). 3159c2798b19SChris Wilson */ 3160c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3161c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3162c2798b19SChris Wilson i915_handle_error(dev, false); 3163c2798b19SChris Wilson 3164c2798b19SChris Wilson for_each_pipe(pipe) { 3165c2798b19SChris Wilson int reg = PIPESTAT(pipe); 3166c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3167c2798b19SChris Wilson 3168c2798b19SChris Wilson /* 3169c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 3170c2798b19SChris Wilson */ 3171c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3172c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3173c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3174c2798b19SChris Wilson pipe_name(pipe)); 3175c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3176c2798b19SChris Wilson } 3177c2798b19SChris Wilson } 3178c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3179c2798b19SChris Wilson 3180c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 3181c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 3182c2798b19SChris Wilson 3183d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 3184c2798b19SChris Wilson 3185c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 3186c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3187c2798b19SChris Wilson 31884356d586SDaniel Vetter for_each_pipe(pipe) { 31894356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 31904356d586SDaniel Vetter i8xx_handle_vblank(dev, pipe, iir)) 31914356d586SDaniel Vetter flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3192c2798b19SChris Wilson 31934356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3194277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 31954356d586SDaniel Vetter } 3196c2798b19SChris Wilson 3197c2798b19SChris Wilson iir = new_iir; 3198c2798b19SChris Wilson } 3199c2798b19SChris Wilson 3200c2798b19SChris Wilson return IRQ_HANDLED; 3201c2798b19SChris Wilson } 3202c2798b19SChris Wilson 3203c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 3204c2798b19SChris Wilson { 3205c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3206c2798b19SChris Wilson int pipe; 3207c2798b19SChris Wilson 3208c2798b19SChris Wilson for_each_pipe(pipe) { 3209c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 3210c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3211c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3212c2798b19SChris Wilson } 3213c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 3214c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 3215c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 3216c2798b19SChris Wilson } 3217c2798b19SChris Wilson 3218a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 3219a266c7d5SChris Wilson { 3220a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3221a266c7d5SChris Wilson int pipe; 3222a266c7d5SChris Wilson 3223a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3224a266c7d5SChris Wilson 3225a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3226a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3227a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3228a266c7d5SChris Wilson } 3229a266c7d5SChris Wilson 323000d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 3231a266c7d5SChris Wilson for_each_pipe(pipe) 3232a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3233a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3234a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3235a266c7d5SChris Wilson POSTING_READ(IER); 3236a266c7d5SChris Wilson } 3237a266c7d5SChris Wilson 3238a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 3239a266c7d5SChris Wilson { 3240a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 324138bde180SChris Wilson u32 enable_mask; 3242379ef82dSDaniel Vetter unsigned long irqflags; 3243a266c7d5SChris Wilson 324438bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 324538bde180SChris Wilson 324638bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 324738bde180SChris Wilson dev_priv->irq_mask = 324838bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 324938bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 325038bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 325138bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 325238bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 325338bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 325438bde180SChris Wilson 325538bde180SChris Wilson enable_mask = 325638bde180SChris Wilson I915_ASLE_INTERRUPT | 325738bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 325838bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 325938bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 326038bde180SChris Wilson I915_USER_INTERRUPT; 326138bde180SChris Wilson 3262a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 326320afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 326420afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 326520afbda2SDaniel Vetter 3266a266c7d5SChris Wilson /* Enable in IER... */ 3267a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3268a266c7d5SChris Wilson /* and unmask in IMR */ 3269a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3270a266c7d5SChris Wilson } 3271a266c7d5SChris Wilson 3272a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3273a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3274a266c7d5SChris Wilson POSTING_READ(IER); 3275a266c7d5SChris Wilson 3276f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 327720afbda2SDaniel Vetter 3278379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3279379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 3280379ef82dSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 32813b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 32823b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3283379ef82dSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3284379ef82dSDaniel Vetter 328520afbda2SDaniel Vetter return 0; 328620afbda2SDaniel Vetter } 328720afbda2SDaniel Vetter 328890a72f87SVille Syrjälä /* 328990a72f87SVille Syrjälä * Returns true when a page flip has completed. 329090a72f87SVille Syrjälä */ 329190a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 329290a72f87SVille Syrjälä int plane, int pipe, u32 iir) 329390a72f87SVille Syrjälä { 329490a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 329590a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 329690a72f87SVille Syrjälä 329790a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 329890a72f87SVille Syrjälä return false; 329990a72f87SVille Syrjälä 330090a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 330190a72f87SVille Syrjälä return false; 330290a72f87SVille Syrjälä 330390a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 330490a72f87SVille Syrjälä 330590a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 330690a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 330790a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 330890a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 330990a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 331090a72f87SVille Syrjälä */ 331190a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 331290a72f87SVille Syrjälä return false; 331390a72f87SVille Syrjälä 331490a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 331590a72f87SVille Syrjälä 331690a72f87SVille Syrjälä return true; 331790a72f87SVille Syrjälä } 331890a72f87SVille Syrjälä 3319ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 3320a266c7d5SChris Wilson { 3321a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3322a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 33238291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3324a266c7d5SChris Wilson unsigned long irqflags; 332538bde180SChris Wilson u32 flip_mask = 332638bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 332738bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 332838bde180SChris Wilson int pipe, ret = IRQ_NONE; 3329a266c7d5SChris Wilson 3330a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3331a266c7d5SChris Wilson 3332a266c7d5SChris Wilson iir = I915_READ(IIR); 333338bde180SChris Wilson do { 333438bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 33358291ee90SChris Wilson bool blc_event = false; 3336a266c7d5SChris Wilson 3337a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3338a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3339a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3340a266c7d5SChris Wilson * interrupts (for non-MSI). 3341a266c7d5SChris Wilson */ 3342a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3343a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3344a266c7d5SChris Wilson i915_handle_error(dev, false); 3345a266c7d5SChris Wilson 3346a266c7d5SChris Wilson for_each_pipe(pipe) { 3347a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3348a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3349a266c7d5SChris Wilson 335038bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 3351a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3352a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3353a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3354a266c7d5SChris Wilson pipe_name(pipe)); 3355a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 335638bde180SChris Wilson irq_received = true; 3357a266c7d5SChris Wilson } 3358a266c7d5SChris Wilson } 3359a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3360a266c7d5SChris Wilson 3361a266c7d5SChris Wilson if (!irq_received) 3362a266c7d5SChris Wilson break; 3363a266c7d5SChris Wilson 3364a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3365a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 3366a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3367a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3368b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3369a266c7d5SChris Wilson 3370a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3371a266c7d5SChris Wilson hotplug_status); 337291d131d2SDaniel Vetter 337310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 337491d131d2SDaniel Vetter 3375a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 337638bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 3377a266c7d5SChris Wilson } 3378a266c7d5SChris Wilson 337938bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 3380a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3381a266c7d5SChris Wilson 3382a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3383a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3384a266c7d5SChris Wilson 3385a266c7d5SChris Wilson for_each_pipe(pipe) { 338638bde180SChris Wilson int plane = pipe; 338738bde180SChris Wilson if (IS_MOBILE(dev)) 338838bde180SChris Wilson plane = !plane; 33895e2032d4SVille Syrjälä 339090a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 339190a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 339290a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3393a266c7d5SChris Wilson 3394a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3395a266c7d5SChris Wilson blc_event = true; 33964356d586SDaniel Vetter 33974356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3398277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 3399a266c7d5SChris Wilson } 3400a266c7d5SChris Wilson 3401a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3402a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3403a266c7d5SChris Wilson 3404a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3405a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3406a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3407a266c7d5SChris Wilson * we would never get another interrupt. 3408a266c7d5SChris Wilson * 3409a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3410a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3411a266c7d5SChris Wilson * another one. 3412a266c7d5SChris Wilson * 3413a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3414a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3415a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3416a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3417a266c7d5SChris Wilson * stray interrupts. 3418a266c7d5SChris Wilson */ 341938bde180SChris Wilson ret = IRQ_HANDLED; 3420a266c7d5SChris Wilson iir = new_iir; 342138bde180SChris Wilson } while (iir & ~flip_mask); 3422a266c7d5SChris Wilson 3423d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 34248291ee90SChris Wilson 3425a266c7d5SChris Wilson return ret; 3426a266c7d5SChris Wilson } 3427a266c7d5SChris Wilson 3428a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 3429a266c7d5SChris Wilson { 3430a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3431a266c7d5SChris Wilson int pipe; 3432a266c7d5SChris Wilson 3433ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3434ac4c16c5SEgbert Eich 3435a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3436a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3437a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3438a266c7d5SChris Wilson } 3439a266c7d5SChris Wilson 344000d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 344155b39755SChris Wilson for_each_pipe(pipe) { 344255b39755SChris Wilson /* Clear enable bits; then clear status bits */ 3443a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 344455b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 344555b39755SChris Wilson } 3446a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3447a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3448a266c7d5SChris Wilson 3449a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3450a266c7d5SChris Wilson } 3451a266c7d5SChris Wilson 3452a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 3453a266c7d5SChris Wilson { 3454a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3455a266c7d5SChris Wilson int pipe; 3456a266c7d5SChris Wilson 3457a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3458a266c7d5SChris Wilson 3459a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3460a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3461a266c7d5SChris Wilson 3462a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 3463a266c7d5SChris Wilson for_each_pipe(pipe) 3464a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3465a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3466a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3467a266c7d5SChris Wilson POSTING_READ(IER); 3468a266c7d5SChris Wilson } 3469a266c7d5SChris Wilson 3470a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 3471a266c7d5SChris Wilson { 3472a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3473bbba0a97SChris Wilson u32 enable_mask; 3474a266c7d5SChris Wilson u32 error_mask; 3475b79480baSDaniel Vetter unsigned long irqflags; 3476a266c7d5SChris Wilson 3477a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 3478bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3479adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 3480bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3481bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3482bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3483bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3484bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3485bbba0a97SChris Wilson 3486bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 348721ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 348821ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3489bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 3490bbba0a97SChris Wilson 3491bbba0a97SChris Wilson if (IS_G4X(dev)) 3492bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 3493a266c7d5SChris Wilson 3494b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3495b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 3496b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34973b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 34983b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 34993b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3500b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3501a266c7d5SChris Wilson 3502a266c7d5SChris Wilson /* 3503a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 3504a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 3505a266c7d5SChris Wilson */ 3506a266c7d5SChris Wilson if (IS_G4X(dev)) { 3507a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 3508a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 3509a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 3510a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3511a266c7d5SChris Wilson } else { 3512a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 3513a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3514a266c7d5SChris Wilson } 3515a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 3516a266c7d5SChris Wilson 3517a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3518a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3519a266c7d5SChris Wilson POSTING_READ(IER); 3520a266c7d5SChris Wilson 352120afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 352220afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 352320afbda2SDaniel Vetter 3524f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 352520afbda2SDaniel Vetter 352620afbda2SDaniel Vetter return 0; 352720afbda2SDaniel Vetter } 352820afbda2SDaniel Vetter 3529bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 353020afbda2SDaniel Vetter { 353120afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3532e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3533cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 353420afbda2SDaniel Vetter u32 hotplug_en; 353520afbda2SDaniel Vetter 3536b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 3537b5ea2d56SDaniel Vetter 3538bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 3539bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3540bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3541adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 3542e5868a31SEgbert Eich /* enable bits are the same for all generations */ 3543cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3544cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3545cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3546a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 3547a266c7d5SChris Wilson to generate a spurious hotplug event about three 3548a266c7d5SChris Wilson seconds later. So just do it once. 3549a266c7d5SChris Wilson */ 3550a266c7d5SChris Wilson if (IS_G4X(dev)) 3551a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 355285fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3553a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3554a266c7d5SChris Wilson 3555a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 3556a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3557a266c7d5SChris Wilson } 3558bac56d5bSEgbert Eich } 3559a266c7d5SChris Wilson 3560ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 3561a266c7d5SChris Wilson { 3562a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3563a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3564a266c7d5SChris Wilson u32 iir, new_iir; 3565a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 3566a266c7d5SChris Wilson unsigned long irqflags; 3567a266c7d5SChris Wilson int irq_received; 3568a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 356921ad8330SVille Syrjälä u32 flip_mask = 357021ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 357121ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3572a266c7d5SChris Wilson 3573a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3574a266c7d5SChris Wilson 3575a266c7d5SChris Wilson iir = I915_READ(IIR); 3576a266c7d5SChris Wilson 3577a266c7d5SChris Wilson for (;;) { 35782c8ba29fSChris Wilson bool blc_event = false; 35792c8ba29fSChris Wilson 358021ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 3581a266c7d5SChris Wilson 3582a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3583a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3584a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3585a266c7d5SChris Wilson * interrupts (for non-MSI). 3586a266c7d5SChris Wilson */ 3587a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3588a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3589a266c7d5SChris Wilson i915_handle_error(dev, false); 3590a266c7d5SChris Wilson 3591a266c7d5SChris Wilson for_each_pipe(pipe) { 3592a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3593a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3594a266c7d5SChris Wilson 3595a266c7d5SChris Wilson /* 3596a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 3597a266c7d5SChris Wilson */ 3598a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3599a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3600a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3601a266c7d5SChris Wilson pipe_name(pipe)); 3602a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3603a266c7d5SChris Wilson irq_received = 1; 3604a266c7d5SChris Wilson } 3605a266c7d5SChris Wilson } 3606a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3607a266c7d5SChris Wilson 3608a266c7d5SChris Wilson if (!irq_received) 3609a266c7d5SChris Wilson break; 3610a266c7d5SChris Wilson 3611a266c7d5SChris Wilson ret = IRQ_HANDLED; 3612a266c7d5SChris Wilson 3613a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3614adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3615a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3616b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3617b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 36184f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 3619a266c7d5SChris Wilson 3620a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3621a266c7d5SChris Wilson hotplug_status); 362291d131d2SDaniel Vetter 362310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 362410a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 362591d131d2SDaniel Vetter 3626a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3627a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 3628a266c7d5SChris Wilson } 3629a266c7d5SChris Wilson 363021ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3631a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3632a266c7d5SChris Wilson 3633a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3634a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3635a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3636a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3637a266c7d5SChris Wilson 3638a266c7d5SChris Wilson for_each_pipe(pipe) { 36392c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 364090a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 364190a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3642a266c7d5SChris Wilson 3643a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3644a266c7d5SChris Wilson blc_event = true; 36454356d586SDaniel Vetter 36464356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3647277de95eSDaniel Vetter i9xx_pipe_crc_irq_handler(dev, pipe); 3648a266c7d5SChris Wilson } 3649a266c7d5SChris Wilson 3650a266c7d5SChris Wilson 3651a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3652a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3653a266c7d5SChris Wilson 3654515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3655515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3656515ac2bbSDaniel Vetter 3657a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3658a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3659a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3660a266c7d5SChris Wilson * we would never get another interrupt. 3661a266c7d5SChris Wilson * 3662a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3663a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3664a266c7d5SChris Wilson * another one. 3665a266c7d5SChris Wilson * 3666a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3667a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3668a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3669a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3670a266c7d5SChris Wilson * stray interrupts. 3671a266c7d5SChris Wilson */ 3672a266c7d5SChris Wilson iir = new_iir; 3673a266c7d5SChris Wilson } 3674a266c7d5SChris Wilson 3675d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 36762c8ba29fSChris Wilson 3677a266c7d5SChris Wilson return ret; 3678a266c7d5SChris Wilson } 3679a266c7d5SChris Wilson 3680a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3681a266c7d5SChris Wilson { 3682a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3683a266c7d5SChris Wilson int pipe; 3684a266c7d5SChris Wilson 3685a266c7d5SChris Wilson if (!dev_priv) 3686a266c7d5SChris Wilson return; 3687a266c7d5SChris Wilson 3688ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3689ac4c16c5SEgbert Eich 3690a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3691a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3692a266c7d5SChris Wilson 3693a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3694a266c7d5SChris Wilson for_each_pipe(pipe) 3695a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3696a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3697a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3698a266c7d5SChris Wilson 3699a266c7d5SChris Wilson for_each_pipe(pipe) 3700a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3701a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3702a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3703a266c7d5SChris Wilson } 3704a266c7d5SChris Wilson 3705ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3706ac4c16c5SEgbert Eich { 3707ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3708ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3709ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3710ac4c16c5SEgbert Eich unsigned long irqflags; 3711ac4c16c5SEgbert Eich int i; 3712ac4c16c5SEgbert Eich 3713ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3714ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3715ac4c16c5SEgbert Eich struct drm_connector *connector; 3716ac4c16c5SEgbert Eich 3717ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3718ac4c16c5SEgbert Eich continue; 3719ac4c16c5SEgbert Eich 3720ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3721ac4c16c5SEgbert Eich 3722ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3723ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3724ac4c16c5SEgbert Eich 3725ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3726ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3727ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3728ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3729ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3730ac4c16c5SEgbert Eich if (!connector->polled) 3731ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3732ac4c16c5SEgbert Eich } 3733ac4c16c5SEgbert Eich } 3734ac4c16c5SEgbert Eich } 3735ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3736ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3737ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3738ac4c16c5SEgbert Eich } 3739ac4c16c5SEgbert Eich 3740f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3741f71d4af4SJesse Barnes { 37428b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 37438b2e326dSChris Wilson 37448b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 374599584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3746c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3747a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 37488b2e326dSChris Wilson 374999584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 375099584db3SDaniel Vetter i915_hangcheck_elapsed, 375161bac78eSDaniel Vetter (unsigned long) dev); 3752ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3753ac4c16c5SEgbert Eich (unsigned long) dev_priv); 375461bac78eSDaniel Vetter 375597a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 37569ee32feaSDaniel Vetter 37574cdb83ecSVille Syrjälä if (IS_GEN2(dev)) { 37584cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 37594cdb83ecSVille Syrjälä dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 37604cdb83ecSVille Syrjälä } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3761f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3762f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3763391f75e2SVille Syrjälä } else { 3764391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 3765391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3766f71d4af4SJesse Barnes } 3767f71d4af4SJesse Barnes 3768c2baf4b7SVille Syrjälä if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3769f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3770f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3771c2baf4b7SVille Syrjälä } 3772f71d4af4SJesse Barnes 37737e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 37747e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 37757e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 37767e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 37777e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 37787e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 37797e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3780fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3781abd58f01SBen Widawsky } else if (IS_GEN8(dev)) { 3782abd58f01SBen Widawsky dev->driver->irq_handler = gen8_irq_handler; 3783abd58f01SBen Widawsky dev->driver->irq_preinstall = gen8_irq_preinstall; 3784abd58f01SBen Widawsky dev->driver->irq_postinstall = gen8_irq_postinstall; 3785abd58f01SBen Widawsky dev->driver->irq_uninstall = gen8_irq_uninstall; 3786abd58f01SBen Widawsky dev->driver->enable_vblank = gen8_enable_vblank; 3787abd58f01SBen Widawsky dev->driver->disable_vblank = gen8_disable_vblank; 3788abd58f01SBen Widawsky dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3789f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3790f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3791f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3792f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3793f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3794f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3795f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 379682a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3797f71d4af4SJesse Barnes } else { 3798c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3799c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3800c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3801c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3802c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3803a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3804a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3805a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3806a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3807a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 380820afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3809c2798b19SChris Wilson } else { 3810a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3811a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3812a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3813a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3814bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3815c2798b19SChris Wilson } 3816f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3817f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3818f71d4af4SJesse Barnes } 3819f71d4af4SJesse Barnes } 382020afbda2SDaniel Vetter 382120afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 382220afbda2SDaniel Vetter { 382320afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3824821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3825821450c6SEgbert Eich struct drm_connector *connector; 3826b5ea2d56SDaniel Vetter unsigned long irqflags; 3827821450c6SEgbert Eich int i; 382820afbda2SDaniel Vetter 3829821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3830821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3831821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3832821450c6SEgbert Eich } 3833821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3834821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3835821450c6SEgbert Eich connector->polled = intel_connector->polled; 3836821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3837821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3838821450c6SEgbert Eich } 3839b5ea2d56SDaniel Vetter 3840b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3841b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3842b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 384320afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 384420afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3845b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 384620afbda2SDaniel Vetter } 3847c67a470bSPaulo Zanoni 3848c67a470bSPaulo Zanoni /* Disable interrupts so we can allow Package C8+. */ 3849c67a470bSPaulo Zanoni void hsw_pc8_disable_interrupts(struct drm_device *dev) 3850c67a470bSPaulo Zanoni { 3851c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3852c67a470bSPaulo Zanoni unsigned long irqflags; 3853c67a470bSPaulo Zanoni 3854c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3855c67a470bSPaulo Zanoni 3856c67a470bSPaulo Zanoni dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3857c67a470bSPaulo Zanoni dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3858c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3859c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3860c67a470bSPaulo Zanoni dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3861c67a470bSPaulo Zanoni 3862c67a470bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); 3863c67a470bSPaulo Zanoni ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); 3864c67a470bSPaulo Zanoni ilk_disable_gt_irq(dev_priv, 0xffffffff); 3865c67a470bSPaulo Zanoni snb_disable_pm_irq(dev_priv, 0xffffffff); 3866c67a470bSPaulo Zanoni 3867c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = true; 3868c67a470bSPaulo Zanoni 3869c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3870c67a470bSPaulo Zanoni } 3871c67a470bSPaulo Zanoni 3872c67a470bSPaulo Zanoni /* Restore interrupts so we can recover from Package C8+. */ 3873c67a470bSPaulo Zanoni void hsw_pc8_restore_interrupts(struct drm_device *dev) 3874c67a470bSPaulo Zanoni { 3875c67a470bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3876c67a470bSPaulo Zanoni unsigned long irqflags; 3877c67a470bSPaulo Zanoni uint32_t val, expected; 3878c67a470bSPaulo Zanoni 3879c67a470bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3880c67a470bSPaulo Zanoni 3881c67a470bSPaulo Zanoni val = I915_READ(DEIMR); 3882c67a470bSPaulo Zanoni expected = ~DE_PCH_EVENT_IVB; 3883c67a470bSPaulo Zanoni WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); 3884c67a470bSPaulo Zanoni 3885c67a470bSPaulo Zanoni val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; 3886c67a470bSPaulo Zanoni expected = ~SDE_HOTPLUG_MASK_CPT; 3887c67a470bSPaulo Zanoni WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", 3888c67a470bSPaulo Zanoni val, expected); 3889c67a470bSPaulo Zanoni 3890c67a470bSPaulo Zanoni val = I915_READ(GTIMR); 3891c67a470bSPaulo Zanoni expected = 0xffffffff; 3892c67a470bSPaulo Zanoni WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); 3893c67a470bSPaulo Zanoni 3894c67a470bSPaulo Zanoni val = I915_READ(GEN6_PMIMR); 3895c67a470bSPaulo Zanoni expected = 0xffffffff; 3896c67a470bSPaulo Zanoni WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, 3897c67a470bSPaulo Zanoni expected); 3898c67a470bSPaulo Zanoni 3899c67a470bSPaulo Zanoni dev_priv->pc8.irqs_disabled = false; 3900c67a470bSPaulo Zanoni 3901c67a470bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3902c67a470bSPaulo Zanoni ibx_enable_display_interrupt(dev_priv, 3903c67a470bSPaulo Zanoni ~dev_priv->pc8.regsave.sdeimr & 3904c67a470bSPaulo Zanoni ~SDE_HOTPLUG_MASK_CPT); 3905c67a470bSPaulo Zanoni ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3906c67a470bSPaulo Zanoni snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3907c67a470bSPaulo Zanoni I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3908c67a470bSPaulo Zanoni 3909c67a470bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3910c67a470bSPaulo Zanoni } 3911