1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 83995b6762SChris Wilson static void 84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85036a4a7dSZhenyu Wang { 864bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 874bc9d430SDaniel Vetter 881ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 891ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 901ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 913143a2bfSChris Wilson POSTING_READ(DEIMR); 92036a4a7dSZhenyu Wang } 93036a4a7dSZhenyu Wang } 94036a4a7dSZhenyu Wang 950ff9800aSPaulo Zanoni static void 96f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97036a4a7dSZhenyu Wang { 984bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 994bc9d430SDaniel Vetter 1001ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1011ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1021ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1033143a2bfSChris Wilson POSTING_READ(DEIMR); 104036a4a7dSZhenyu Wang } 105036a4a7dSZhenyu Wang } 106036a4a7dSZhenyu Wang 10743eaea13SPaulo Zanoni /** 10843eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 10943eaea13SPaulo Zanoni * @dev_priv: driver private 11043eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 11143eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 11243eaea13SPaulo Zanoni */ 11343eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 11443eaea13SPaulo Zanoni uint32_t interrupt_mask, 11543eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 11643eaea13SPaulo Zanoni { 11743eaea13SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 11843eaea13SPaulo Zanoni 11943eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 12043eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 12143eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 12243eaea13SPaulo Zanoni POSTING_READ(GTIMR); 12343eaea13SPaulo Zanoni } 12443eaea13SPaulo Zanoni 12543eaea13SPaulo Zanoni void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 12643eaea13SPaulo Zanoni { 12743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 12843eaea13SPaulo Zanoni } 12943eaea13SPaulo Zanoni 13043eaea13SPaulo Zanoni void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 13143eaea13SPaulo Zanoni { 13243eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 13343eaea13SPaulo Zanoni } 13443eaea13SPaulo Zanoni 135edbfdb45SPaulo Zanoni /** 136edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 137edbfdb45SPaulo Zanoni * @dev_priv: driver private 138edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 139edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 140edbfdb45SPaulo Zanoni */ 141edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 142edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 143edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 144edbfdb45SPaulo Zanoni { 145605cd25bSPaulo Zanoni uint32_t new_val; 146edbfdb45SPaulo Zanoni 147edbfdb45SPaulo Zanoni assert_spin_locked(&dev_priv->irq_lock); 148edbfdb45SPaulo Zanoni 149605cd25bSPaulo Zanoni new_val = dev_priv->pm_irq_mask; 150f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 151f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 152f52ecbcfSPaulo Zanoni 153605cd25bSPaulo Zanoni if (new_val != dev_priv->pm_irq_mask) { 154605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = new_val; 155605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 156edbfdb45SPaulo Zanoni POSTING_READ(GEN6_PMIMR); 157edbfdb45SPaulo Zanoni } 158f52ecbcfSPaulo Zanoni } 159edbfdb45SPaulo Zanoni 160edbfdb45SPaulo Zanoni void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 161edbfdb45SPaulo Zanoni { 162edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 163edbfdb45SPaulo Zanoni } 164edbfdb45SPaulo Zanoni 165edbfdb45SPaulo Zanoni void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 166edbfdb45SPaulo Zanoni { 167edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, 0); 168edbfdb45SPaulo Zanoni } 169edbfdb45SPaulo Zanoni 170edbfdb45SPaulo Zanoni static void snb_set_pm_irq(struct drm_i915_private *dev_priv, uint32_t val) 171edbfdb45SPaulo Zanoni { 172edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, 0xffffffff, ~val); 173edbfdb45SPaulo Zanoni } 174edbfdb45SPaulo Zanoni 1758664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1768664281bSPaulo Zanoni { 1778664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1788664281bSPaulo Zanoni struct intel_crtc *crtc; 1798664281bSPaulo Zanoni enum pipe pipe; 1808664281bSPaulo Zanoni 1814bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1824bc9d430SDaniel Vetter 1838664281bSPaulo Zanoni for_each_pipe(pipe) { 1848664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1858664281bSPaulo Zanoni 1868664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 1878664281bSPaulo Zanoni return false; 1888664281bSPaulo Zanoni } 1898664281bSPaulo Zanoni 1908664281bSPaulo Zanoni return true; 1918664281bSPaulo Zanoni } 1928664281bSPaulo Zanoni 1938664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 1948664281bSPaulo Zanoni { 1958664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1968664281bSPaulo Zanoni enum pipe pipe; 1978664281bSPaulo Zanoni struct intel_crtc *crtc; 1988664281bSPaulo Zanoni 199fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 200fee884edSDaniel Vetter 2018664281bSPaulo Zanoni for_each_pipe(pipe) { 2028664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2038664281bSPaulo Zanoni 2048664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 2058664281bSPaulo Zanoni return false; 2068664281bSPaulo Zanoni } 2078664281bSPaulo Zanoni 2088664281bSPaulo Zanoni return true; 2098664281bSPaulo Zanoni } 2108664281bSPaulo Zanoni 2118664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 2128664281bSPaulo Zanoni enum pipe pipe, bool enable) 2138664281bSPaulo Zanoni { 2148664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2158664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 2168664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 2178664281bSPaulo Zanoni 2188664281bSPaulo Zanoni if (enable) 2198664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 2208664281bSPaulo Zanoni else 2218664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 2228664281bSPaulo Zanoni } 2238664281bSPaulo Zanoni 2248664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 2257336df65SDaniel Vetter enum pipe pipe, bool enable) 2268664281bSPaulo Zanoni { 2278664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2288664281bSPaulo Zanoni if (enable) { 2297336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 2307336df65SDaniel Vetter 2318664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 2328664281bSPaulo Zanoni return; 2338664281bSPaulo Zanoni 2348664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 2358664281bSPaulo Zanoni } else { 2367336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 2377336df65SDaniel Vetter 2387336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 2398664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 2407336df65SDaniel Vetter 2417336df65SDaniel Vetter if (!was_enabled && 2427336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 2437336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 2447336df65SDaniel Vetter pipe_name(pipe)); 2457336df65SDaniel Vetter } 2468664281bSPaulo Zanoni } 2478664281bSPaulo Zanoni } 2488664281bSPaulo Zanoni 249fee884edSDaniel Vetter /** 250fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 251fee884edSDaniel Vetter * @dev_priv: driver private 252fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 253fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 254fee884edSDaniel Vetter */ 255fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 256fee884edSDaniel Vetter uint32_t interrupt_mask, 257fee884edSDaniel Vetter uint32_t enabled_irq_mask) 258fee884edSDaniel Vetter { 259fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 260fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 261fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 262fee884edSDaniel Vetter 263fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 264fee884edSDaniel Vetter 265fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 266fee884edSDaniel Vetter POSTING_READ(SDEIMR); 267fee884edSDaniel Vetter } 268fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 269fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 270fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 271fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 272fee884edSDaniel Vetter 273de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 274de28075dSDaniel Vetter enum transcoder pch_transcoder, 2758664281bSPaulo Zanoni bool enable) 2768664281bSPaulo Zanoni { 2778664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 278de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 279de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 2808664281bSPaulo Zanoni 2818664281bSPaulo Zanoni if (enable) 282fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 2838664281bSPaulo Zanoni else 284fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 2858664281bSPaulo Zanoni } 2868664281bSPaulo Zanoni 2878664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 2888664281bSPaulo Zanoni enum transcoder pch_transcoder, 2898664281bSPaulo Zanoni bool enable) 2908664281bSPaulo Zanoni { 2918664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2928664281bSPaulo Zanoni 2938664281bSPaulo Zanoni if (enable) { 2941dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 2951dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 2961dd246fbSDaniel Vetter 2978664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 2988664281bSPaulo Zanoni return; 2998664281bSPaulo Zanoni 300fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3018664281bSPaulo Zanoni } else { 3021dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 3031dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 3041dd246fbSDaniel Vetter 3051dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 306fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 3071dd246fbSDaniel Vetter 3081dd246fbSDaniel Vetter if (!was_enabled && 3091dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 3101dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 3111dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 3121dd246fbSDaniel Vetter } 3138664281bSPaulo Zanoni } 3148664281bSPaulo Zanoni } 3158664281bSPaulo Zanoni 3168664281bSPaulo Zanoni /** 3178664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 3188664281bSPaulo Zanoni * @dev: drm device 3198664281bSPaulo Zanoni * @pipe: pipe 3208664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3218664281bSPaulo Zanoni * 3228664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 3238664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 3248664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 3258664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 3268664281bSPaulo Zanoni * bit for all the pipes. 3278664281bSPaulo Zanoni * 3288664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3298664281bSPaulo Zanoni */ 3308664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 3318664281bSPaulo Zanoni enum pipe pipe, bool enable) 3328664281bSPaulo Zanoni { 3338664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3348664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 3358664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3368664281bSPaulo Zanoni unsigned long flags; 3378664281bSPaulo Zanoni bool ret; 3388664281bSPaulo Zanoni 3398664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3408664281bSPaulo Zanoni 3418664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 3428664281bSPaulo Zanoni 3438664281bSPaulo Zanoni if (enable == ret) 3448664281bSPaulo Zanoni goto done; 3458664281bSPaulo Zanoni 3468664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 3478664281bSPaulo Zanoni 3488664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 3498664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 3508664281bSPaulo Zanoni else if (IS_GEN7(dev)) 3517336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 3528664281bSPaulo Zanoni 3538664281bSPaulo Zanoni done: 3548664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3558664281bSPaulo Zanoni return ret; 3568664281bSPaulo Zanoni } 3578664281bSPaulo Zanoni 3588664281bSPaulo Zanoni /** 3598664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 3608664281bSPaulo Zanoni * @dev: drm device 3618664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 3628664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 3638664281bSPaulo Zanoni * 3648664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 3658664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 3668664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 3678664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 3688664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 3698664281bSPaulo Zanoni * 3708664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3718664281bSPaulo Zanoni */ 3728664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 3738664281bSPaulo Zanoni enum transcoder pch_transcoder, 3748664281bSPaulo Zanoni bool enable) 3758664281bSPaulo Zanoni { 3768664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 377de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 378de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3798664281bSPaulo Zanoni unsigned long flags; 3808664281bSPaulo Zanoni bool ret; 3818664281bSPaulo Zanoni 382de28075dSDaniel Vetter /* 383de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 384de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 385de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 386de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 387de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 388de28075dSDaniel Vetter * crtc on LPT won't cause issues. 389de28075dSDaniel Vetter */ 3908664281bSPaulo Zanoni 3918664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3928664281bSPaulo Zanoni 3938664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 3948664281bSPaulo Zanoni 3958664281bSPaulo Zanoni if (enable == ret) 3968664281bSPaulo Zanoni goto done; 3978664281bSPaulo Zanoni 3988664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 3998664281bSPaulo Zanoni 4008664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 401de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4028664281bSPaulo Zanoni else 4038664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 4048664281bSPaulo Zanoni 4058664281bSPaulo Zanoni done: 4068664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 4078664281bSPaulo Zanoni return ret; 4088664281bSPaulo Zanoni } 4098664281bSPaulo Zanoni 4108664281bSPaulo Zanoni 4117c463586SKeith Packard void 4127c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4137c463586SKeith Packard { 4149db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 41546c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4167c463586SKeith Packard 417b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 418b79480baSDaniel Vetter 41946c06a30SVille Syrjälä if ((pipestat & mask) == mask) 42046c06a30SVille Syrjälä return; 42146c06a30SVille Syrjälä 4227c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 42346c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 42446c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4253143a2bfSChris Wilson POSTING_READ(reg); 4267c463586SKeith Packard } 4277c463586SKeith Packard 4287c463586SKeith Packard void 4297c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 4307c463586SKeith Packard { 4319db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 43246c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 4337c463586SKeith Packard 434b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 435b79480baSDaniel Vetter 43646c06a30SVille Syrjälä if ((pipestat & mask) == 0) 43746c06a30SVille Syrjälä return; 43846c06a30SVille Syrjälä 43946c06a30SVille Syrjälä pipestat &= ~mask; 44046c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 4413143a2bfSChris Wilson POSTING_READ(reg); 4427c463586SKeith Packard } 4437c463586SKeith Packard 444c0e09200SDave Airlie /** 445f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 44601c66889SZhao Yakui */ 447f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 44801c66889SZhao Yakui { 4491ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 4501ec14ad3SChris Wilson unsigned long irqflags; 4511ec14ad3SChris Wilson 452f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 453f49e38ddSJani Nikula return; 454f49e38ddSJani Nikula 4551ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 45601c66889SZhao Yakui 457f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 458a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 459f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 4601ec14ad3SChris Wilson 4611ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 46201c66889SZhao Yakui } 46301c66889SZhao Yakui 46401c66889SZhao Yakui /** 4650a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 4660a3e67a4SJesse Barnes * @dev: DRM device 4670a3e67a4SJesse Barnes * @pipe: pipe to check 4680a3e67a4SJesse Barnes * 4690a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 4700a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 4710a3e67a4SJesse Barnes * before reading such registers if unsure. 4720a3e67a4SJesse Barnes */ 4730a3e67a4SJesse Barnes static int 4740a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 4750a3e67a4SJesse Barnes { 4760a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 477702e7a56SPaulo Zanoni 478a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 479a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 480a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 481a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 48271f8ba6bSPaulo Zanoni 483a01025afSDaniel Vetter return intel_crtc->active; 484a01025afSDaniel Vetter } else { 485a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 486a01025afSDaniel Vetter } 4870a3e67a4SJesse Barnes } 4880a3e67a4SJesse Barnes 48942f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 49042f52ef8SKeith Packard * we use as a pipe index 49142f52ef8SKeith Packard */ 492f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 4930a3e67a4SJesse Barnes { 4940a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4950a3e67a4SJesse Barnes unsigned long high_frame; 4960a3e67a4SJesse Barnes unsigned long low_frame; 4975eddb70bSChris Wilson u32 high1, high2, low; 4980a3e67a4SJesse Barnes 4990a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 50044d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5019db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5020a3e67a4SJesse Barnes return 0; 5030a3e67a4SJesse Barnes } 5040a3e67a4SJesse Barnes 5059db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 5069db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 5075eddb70bSChris Wilson 5080a3e67a4SJesse Barnes /* 5090a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 5100a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 5110a3e67a4SJesse Barnes * register. 5120a3e67a4SJesse Barnes */ 5130a3e67a4SJesse Barnes do { 5145eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5155eddb70bSChris Wilson low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 5165eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 5170a3e67a4SJesse Barnes } while (high1 != high2); 5180a3e67a4SJesse Barnes 5195eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 5205eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 5215eddb70bSChris Wilson return (high1 << 8) | low; 5220a3e67a4SJesse Barnes } 5230a3e67a4SJesse Barnes 524f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 5259880b7a5SJesse Barnes { 5269880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5279db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 5289880b7a5SJesse Barnes 5299880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 53044d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 5319db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5329880b7a5SJesse Barnes return 0; 5339880b7a5SJesse Barnes } 5349880b7a5SJesse Barnes 5359880b7a5SJesse Barnes return I915_READ(reg); 5369880b7a5SJesse Barnes } 5379880b7a5SJesse Barnes 538f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 5390af7e4dfSMario Kleiner int *vpos, int *hpos) 5400af7e4dfSMario Kleiner { 5410af7e4dfSMario Kleiner drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 5420af7e4dfSMario Kleiner u32 vbl = 0, position = 0; 5430af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 5440af7e4dfSMario Kleiner bool in_vbl = true; 5450af7e4dfSMario Kleiner int ret = 0; 546fe2b8f9dSPaulo Zanoni enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 547fe2b8f9dSPaulo Zanoni pipe); 5480af7e4dfSMario Kleiner 5490af7e4dfSMario Kleiner if (!i915_pipe_enabled(dev, pipe)) { 5500af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 5519db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 5520af7e4dfSMario Kleiner return 0; 5530af7e4dfSMario Kleiner } 5540af7e4dfSMario Kleiner 5550af7e4dfSMario Kleiner /* Get vtotal. */ 556fe2b8f9dSPaulo Zanoni vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 5570af7e4dfSMario Kleiner 5580af7e4dfSMario Kleiner if (INTEL_INFO(dev)->gen >= 4) { 5590af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 5600af7e4dfSMario Kleiner * scanout position from Display scan line register. 5610af7e4dfSMario Kleiner */ 5620af7e4dfSMario Kleiner position = I915_READ(PIPEDSL(pipe)); 5630af7e4dfSMario Kleiner 5640af7e4dfSMario Kleiner /* Decode into vertical scanout position. Don't have 5650af7e4dfSMario Kleiner * horizontal scanout position. 5660af7e4dfSMario Kleiner */ 5670af7e4dfSMario Kleiner *vpos = position & 0x1fff; 5680af7e4dfSMario Kleiner *hpos = 0; 5690af7e4dfSMario Kleiner } else { 5700af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 5710af7e4dfSMario Kleiner * We can split this into vertical and horizontal 5720af7e4dfSMario Kleiner * scanout position. 5730af7e4dfSMario Kleiner */ 5740af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 5750af7e4dfSMario Kleiner 576fe2b8f9dSPaulo Zanoni htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 5770af7e4dfSMario Kleiner *vpos = position / htotal; 5780af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 5790af7e4dfSMario Kleiner } 5800af7e4dfSMario Kleiner 5810af7e4dfSMario Kleiner /* Query vblank area. */ 582fe2b8f9dSPaulo Zanoni vbl = I915_READ(VBLANK(cpu_transcoder)); 5830af7e4dfSMario Kleiner 5840af7e4dfSMario Kleiner /* Test position against vblank region. */ 5850af7e4dfSMario Kleiner vbl_start = vbl & 0x1fff; 5860af7e4dfSMario Kleiner vbl_end = (vbl >> 16) & 0x1fff; 5870af7e4dfSMario Kleiner 5880af7e4dfSMario Kleiner if ((*vpos < vbl_start) || (*vpos > vbl_end)) 5890af7e4dfSMario Kleiner in_vbl = false; 5900af7e4dfSMario Kleiner 5910af7e4dfSMario Kleiner /* Inside "upper part" of vblank area? Apply corrective offset: */ 5920af7e4dfSMario Kleiner if (in_vbl && (*vpos >= vbl_start)) 5930af7e4dfSMario Kleiner *vpos = *vpos - vtotal; 5940af7e4dfSMario Kleiner 5950af7e4dfSMario Kleiner /* Readouts valid? */ 5960af7e4dfSMario Kleiner if (vbl > 0) 5970af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 5980af7e4dfSMario Kleiner 5990af7e4dfSMario Kleiner /* In vblank? */ 6000af7e4dfSMario Kleiner if (in_vbl) 6010af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 6020af7e4dfSMario Kleiner 6030af7e4dfSMario Kleiner return ret; 6040af7e4dfSMario Kleiner } 6050af7e4dfSMario Kleiner 606f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 6070af7e4dfSMario Kleiner int *max_error, 6080af7e4dfSMario Kleiner struct timeval *vblank_time, 6090af7e4dfSMario Kleiner unsigned flags) 6100af7e4dfSMario Kleiner { 6114041b853SChris Wilson struct drm_crtc *crtc; 6120af7e4dfSMario Kleiner 6137eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 6144041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6150af7e4dfSMario Kleiner return -EINVAL; 6160af7e4dfSMario Kleiner } 6170af7e4dfSMario Kleiner 6180af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 6194041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 6204041b853SChris Wilson if (crtc == NULL) { 6214041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 6224041b853SChris Wilson return -EINVAL; 6234041b853SChris Wilson } 6244041b853SChris Wilson 6254041b853SChris Wilson if (!crtc->enabled) { 6264041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 6274041b853SChris Wilson return -EBUSY; 6284041b853SChris Wilson } 6290af7e4dfSMario Kleiner 6300af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 6314041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 6324041b853SChris Wilson vblank_time, flags, 6334041b853SChris Wilson crtc); 6340af7e4dfSMario Kleiner } 6350af7e4dfSMario Kleiner 636321a1b30SEgbert Eich static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 637321a1b30SEgbert Eich { 638321a1b30SEgbert Eich enum drm_connector_status old_status; 639321a1b30SEgbert Eich 640321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 641321a1b30SEgbert Eich old_status = connector->status; 642321a1b30SEgbert Eich 643321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 644321a1b30SEgbert Eich DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 645321a1b30SEgbert Eich connector->base.id, 646321a1b30SEgbert Eich drm_get_connector_name(connector), 647321a1b30SEgbert Eich old_status, connector->status); 648321a1b30SEgbert Eich return (old_status != connector->status); 649321a1b30SEgbert Eich } 650321a1b30SEgbert Eich 6515ca58282SJesse Barnes /* 6525ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 6535ca58282SJesse Barnes */ 654ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 655ac4c16c5SEgbert Eich 6565ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 6575ca58282SJesse Barnes { 6585ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 6595ca58282SJesse Barnes hotplug_work); 6605ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 661c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 662cd569aedSEgbert Eich struct intel_connector *intel_connector; 663cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 664cd569aedSEgbert Eich struct drm_connector *connector; 665cd569aedSEgbert Eich unsigned long irqflags; 666cd569aedSEgbert Eich bool hpd_disabled = false; 667321a1b30SEgbert Eich bool changed = false; 668142e2398SEgbert Eich u32 hpd_event_bits; 6695ca58282SJesse Barnes 67052d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 67152d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 67252d7ecedSDaniel Vetter return; 67352d7ecedSDaniel Vetter 674a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 675e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 676e67189abSJesse Barnes 677cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 678142e2398SEgbert Eich 679142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 680142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 681cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 682cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 683cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 684cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 685cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 686cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 687cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 688cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 689cd569aedSEgbert Eich drm_get_connector_name(connector)); 690cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 691cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 692cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 693cd569aedSEgbert Eich hpd_disabled = true; 694cd569aedSEgbert Eich } 695142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 696142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 697142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 698142e2398SEgbert Eich } 699cd569aedSEgbert Eich } 700cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 701cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 702cd569aedSEgbert Eich * some connectors */ 703ac4c16c5SEgbert Eich if (hpd_disabled) { 704cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 705ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 706ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 707ac4c16c5SEgbert Eich } 708cd569aedSEgbert Eich 709cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 710cd569aedSEgbert Eich 711321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 712321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 713321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 714321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 715cd569aedSEgbert Eich if (intel_encoder->hot_plug) 716cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 717321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 718321a1b30SEgbert Eich changed = true; 719321a1b30SEgbert Eich } 720321a1b30SEgbert Eich } 72140ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 72240ee3381SKeith Packard 723321a1b30SEgbert Eich if (changed) 724321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 7255ca58282SJesse Barnes } 7265ca58282SJesse Barnes 727d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 728f97108d1SJesse Barnes { 729f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 730b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 7319270388eSDaniel Vetter u8 new_delay; 7329270388eSDaniel Vetter 733d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 734f97108d1SJesse Barnes 73573edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 73673edd18fSDaniel Vetter 73720e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 7389270388eSDaniel Vetter 7397648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 740b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 741b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 742f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 743f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 744f97108d1SJesse Barnes 745f97108d1SJesse Barnes /* Handle RCS change request from hw */ 746b5b72e89SMatthew Garrett if (busy_up > max_avg) { 74720e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 74820e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 74920e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 75020e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 751b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 75220e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 75320e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 75420e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 75520e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 756f97108d1SJesse Barnes } 757f97108d1SJesse Barnes 7587648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 75920e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 760f97108d1SJesse Barnes 761d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 7629270388eSDaniel Vetter 763f97108d1SJesse Barnes return; 764f97108d1SJesse Barnes } 765f97108d1SJesse Barnes 766549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 767549f7365SChris Wilson struct intel_ring_buffer *ring) 768549f7365SChris Wilson { 769475553deSChris Wilson if (ring->obj == NULL) 770475553deSChris Wilson return; 771475553deSChris Wilson 772b2eadbc8SChris Wilson trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 7739862e600SChris Wilson 774549f7365SChris Wilson wake_up_all(&ring->irq_queue); 77510cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 776549f7365SChris Wilson } 777549f7365SChris Wilson 7784912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 7793b8d8d91SJesse Barnes { 7804912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 781c6a828d3SDaniel Vetter rps.work); 782edbfdb45SPaulo Zanoni u32 pm_iir; 7837b9e0ae6SChris Wilson u8 new_delay; 7843b8d8d91SJesse Barnes 78559cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 786c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 787c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 7884848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 789edbfdb45SPaulo Zanoni snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 79059cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 7914912d041SBen Widawsky 7924848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 7933b8d8d91SJesse Barnes return; 7943b8d8d91SJesse Barnes 7954fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 7967b9e0ae6SChris Wilson 7977425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 798c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay + 1; 7997425034aSVille Syrjälä 8007425034aSVille Syrjälä /* 8017425034aSVille Syrjälä * For better performance, jump directly 8027425034aSVille Syrjälä * to RPe if we're below it. 8037425034aSVille Syrjälä */ 8047425034aSVille Syrjälä if (IS_VALLEYVIEW(dev_priv->dev) && 8057425034aSVille Syrjälä dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 8067425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 8077425034aSVille Syrjälä } else 808c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay - 1; 8093b8d8d91SJesse Barnes 81079249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 81179249636SBen Widawsky * interrupt 81279249636SBen Widawsky */ 813d8289c9eSVille Syrjälä if (new_delay >= dev_priv->rps.min_delay && 814d8289c9eSVille Syrjälä new_delay <= dev_priv->rps.max_delay) { 8150a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 8160a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 8170a073b84SJesse Barnes else 8184912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 81979249636SBen Widawsky } 8203b8d8d91SJesse Barnes 82152ceb908SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) { 82252ceb908SJesse Barnes /* 82352ceb908SJesse Barnes * On VLV, when we enter RC6 we may not be at the minimum 82452ceb908SJesse Barnes * voltage level, so arm a timer to check. It should only 82552ceb908SJesse Barnes * fire when there's activity or once after we've entered 82652ceb908SJesse Barnes * RC6, and then won't be re-armed until the next RPS interrupt. 82752ceb908SJesse Barnes */ 82852ceb908SJesse Barnes mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 82952ceb908SJesse Barnes msecs_to_jiffies(100)); 83052ceb908SJesse Barnes } 83152ceb908SJesse Barnes 8324fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 8333b8d8d91SJesse Barnes } 8343b8d8d91SJesse Barnes 835e3689190SBen Widawsky 836e3689190SBen Widawsky /** 837e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 838e3689190SBen Widawsky * occurred. 839e3689190SBen Widawsky * @work: workqueue struct 840e3689190SBen Widawsky * 841e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 842e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 843e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 844e3689190SBen Widawsky */ 845e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 846e3689190SBen Widawsky { 847e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 848a4da4fa4SDaniel Vetter l3_parity.error_work); 849e3689190SBen Widawsky u32 error_status, row, bank, subbank; 850e3689190SBen Widawsky char *parity_event[5]; 851e3689190SBen Widawsky uint32_t misccpctl; 852e3689190SBen Widawsky unsigned long flags; 853e3689190SBen Widawsky 854e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 855e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 856e3689190SBen Widawsky * any time we access those registers. 857e3689190SBen Widawsky */ 858e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 859e3689190SBen Widawsky 860e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 861e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 862e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 863e3689190SBen Widawsky 864e3689190SBen Widawsky error_status = I915_READ(GEN7_L3CDERRST1); 865e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 866e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 867e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 868e3689190SBen Widawsky 869e3689190SBen Widawsky I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 870e3689190SBen Widawsky GEN7_L3CDERRST1_ENABLE); 871e3689190SBen Widawsky POSTING_READ(GEN7_L3CDERRST1); 872e3689190SBen Widawsky 873e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 874e3689190SBen Widawsky 875e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 87643eaea13SPaulo Zanoni ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 877e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 878e3689190SBen Widawsky 879e3689190SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 880e3689190SBen Widawsky 881cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 882e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 883e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 884e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 885e3689190SBen Widawsky parity_event[4] = NULL; 886e3689190SBen Widawsky 887e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 888e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 889e3689190SBen Widawsky 890e3689190SBen Widawsky DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 891e3689190SBen Widawsky row, bank, subbank); 892e3689190SBen Widawsky 893e3689190SBen Widawsky kfree(parity_event[3]); 894e3689190SBen Widawsky kfree(parity_event[2]); 895e3689190SBen Widawsky kfree(parity_event[1]); 896e3689190SBen Widawsky } 897e3689190SBen Widawsky 898d0ecd7e2SDaniel Vetter static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 899e3689190SBen Widawsky { 900e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 901e3689190SBen Widawsky 902e1ef7cc2SBen Widawsky if (!HAS_L3_GPU_CACHE(dev)) 903e3689190SBen Widawsky return; 904e3689190SBen Widawsky 905d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 90643eaea13SPaulo Zanoni ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 907d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 908e3689190SBen Widawsky 909a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 910e3689190SBen Widawsky } 911e3689190SBen Widawsky 912f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 913f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 914f1af8fc1SPaulo Zanoni u32 gt_iir) 915f1af8fc1SPaulo Zanoni { 916f1af8fc1SPaulo Zanoni if (gt_iir & 917f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 918f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 919f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 920f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 921f1af8fc1SPaulo Zanoni } 922f1af8fc1SPaulo Zanoni 923e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 924e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 925e7b4c6b1SDaniel Vetter u32 gt_iir) 926e7b4c6b1SDaniel Vetter { 927e7b4c6b1SDaniel Vetter 928cc609d5dSBen Widawsky if (gt_iir & 929cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 930e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 931cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 932e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 933cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 934e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 935e7b4c6b1SDaniel Vetter 936cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 937cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 938cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 939e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 940e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 941e7b4c6b1SDaniel Vetter } 942e3689190SBen Widawsky 943cc609d5dSBen Widawsky if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 944d0ecd7e2SDaniel Vetter ivybridge_parity_error_irq_handler(dev); 945e7b4c6b1SDaniel Vetter } 946e7b4c6b1SDaniel Vetter 947baf02a1fSBen Widawsky /* Legacy way of handling PM interrupts */ 948d0ecd7e2SDaniel Vetter static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, 949fc6826d1SChris Wilson u32 pm_iir) 950fc6826d1SChris Wilson { 951fc6826d1SChris Wilson /* 952fc6826d1SChris Wilson * IIR bits should never already be set because IMR should 953fc6826d1SChris Wilson * prevent an interrupt from being shown in IIR. The warning 954fc6826d1SChris Wilson * displays a case where we've unsafely cleared 955c6a828d3SDaniel Vetter * dev_priv->rps.pm_iir. Although missing an interrupt of the same 956fc6826d1SChris Wilson * type is not a problem, it displays a problem in the logic. 957fc6826d1SChris Wilson * 958c6a828d3SDaniel Vetter * The mask bit in IMR is cleared by dev_priv->rps.work. 959fc6826d1SChris Wilson */ 960fc6826d1SChris Wilson 96159cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 962c6a828d3SDaniel Vetter dev_priv->rps.pm_iir |= pm_iir; 963edbfdb45SPaulo Zanoni snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); 96459cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 965fc6826d1SChris Wilson 966c6a828d3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 967fc6826d1SChris Wilson } 968fc6826d1SChris Wilson 969b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 970b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 971b543fb04SEgbert Eich 97210a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 973b543fb04SEgbert Eich u32 hotplug_trigger, 974b543fb04SEgbert Eich const u32 *hpd) 975b543fb04SEgbert Eich { 976b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 977b543fb04SEgbert Eich int i; 97810a504deSDaniel Vetter bool storm_detected = false; 979b543fb04SEgbert Eich 98091d131d2SDaniel Vetter if (!hotplug_trigger) 98191d131d2SDaniel Vetter return; 98291d131d2SDaniel Vetter 983b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 984b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 985821450c6SEgbert Eich 986b8f102e8SEgbert Eich WARN(((hpd[i] & hotplug_trigger) && 987b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), 988b8f102e8SEgbert Eich "Received HPD interrupt although disabled\n"); 989b8f102e8SEgbert Eich 990b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 991b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 992b543fb04SEgbert Eich continue; 993b543fb04SEgbert Eich 994bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 995b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 996b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 997b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 998b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 999b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 1000b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1001b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1002b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1003142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 1004b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 100510a504deSDaniel Vetter storm_detected = true; 1006b543fb04SEgbert Eich } else { 1007b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 1008b8f102e8SEgbert Eich DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1009b8f102e8SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt); 1010b543fb04SEgbert Eich } 1011b543fb04SEgbert Eich } 1012b543fb04SEgbert Eich 101310a504deSDaniel Vetter if (storm_detected) 101410a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 1015b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 10165876fa0dSDaniel Vetter 10175876fa0dSDaniel Vetter queue_work(dev_priv->wq, 10185876fa0dSDaniel Vetter &dev_priv->hotplug_work); 1019b543fb04SEgbert Eich } 1020b543fb04SEgbert Eich 1021515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 1022515ac2bbSDaniel Vetter { 102328c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 102428c70f16SDaniel Vetter 102528c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1026515ac2bbSDaniel Vetter } 1027515ac2bbSDaniel Vetter 1028ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 1029ce99c256SDaniel Vetter { 10309ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 10319ee32feaSDaniel Vetter 10329ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1033ce99c256SDaniel Vetter } 1034ce99c256SDaniel Vetter 1035d0ecd7e2SDaniel Vetter /* Unlike gen6_rps_irq_handler() from which this function is originally derived, 1036baf02a1fSBen Widawsky * we must be able to deal with other PM interrupts. This is complicated because 1037baf02a1fSBen Widawsky * of the way in which we use the masks to defer the RPS work (which for 1038baf02a1fSBen Widawsky * posterity is necessary because of forcewake). 1039baf02a1fSBen Widawsky */ 1040baf02a1fSBen Widawsky static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 1041baf02a1fSBen Widawsky u32 pm_iir) 1042baf02a1fSBen Widawsky { 104341a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 104459cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 10454848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1046edbfdb45SPaulo Zanoni snb_set_pm_irq(dev_priv, dev_priv->rps.pm_iir); 1047edbfdb45SPaulo Zanoni /* never want to mask useful interrupts. */ 10484848405cSBen Widawsky WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 104959cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 10502adbee62SDaniel Vetter 10512adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 105241a05a3aSDaniel Vetter } 1053baf02a1fSBen Widawsky 105412638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 105512638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 105612638c57SBen Widawsky 105712638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 105812638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 105912638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 106012638c57SBen Widawsky } 106112638c57SBen Widawsky } 1062baf02a1fSBen Widawsky 1063ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 10647e231dbeSJesse Barnes { 10657e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 10667e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10677e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 10687e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 10697e231dbeSJesse Barnes unsigned long irqflags; 10707e231dbeSJesse Barnes int pipe; 10717e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 10727e231dbeSJesse Barnes 10737e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 10747e231dbeSJesse Barnes 10757e231dbeSJesse Barnes while (true) { 10767e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 10777e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 10787e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 10797e231dbeSJesse Barnes 10807e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 10817e231dbeSJesse Barnes goto out; 10827e231dbeSJesse Barnes 10837e231dbeSJesse Barnes ret = IRQ_HANDLED; 10847e231dbeSJesse Barnes 1085e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 10867e231dbeSJesse Barnes 10877e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 10887e231dbeSJesse Barnes for_each_pipe(pipe) { 10897e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 10907e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 10917e231dbeSJesse Barnes 10927e231dbeSJesse Barnes /* 10937e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 10947e231dbeSJesse Barnes */ 10957e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 10967e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 10977e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 10987e231dbeSJesse Barnes pipe_name(pipe)); 10997e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 11007e231dbeSJesse Barnes } 11017e231dbeSJesse Barnes } 11027e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 11037e231dbeSJesse Barnes 110431acc7f5SJesse Barnes for_each_pipe(pipe) { 110531acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 110631acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 110731acc7f5SJesse Barnes 110831acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 110931acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 111031acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 111131acc7f5SJesse Barnes } 111231acc7f5SJesse Barnes } 111331acc7f5SJesse Barnes 11147e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 11157e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 11167e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1117b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 11187e231dbeSJesse Barnes 11197e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 11207e231dbeSJesse Barnes hotplug_status); 112191d131d2SDaniel Vetter 112210a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 112391d131d2SDaniel Vetter 11247e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 11257e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 11267e231dbeSJesse Barnes } 11277e231dbeSJesse Barnes 1128515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1129515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 11307e231dbeSJesse Barnes 11314848405cSBen Widawsky if (pm_iir & GEN6_PM_RPS_EVENTS) 1132d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 11337e231dbeSJesse Barnes 11347e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 11357e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 11367e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 11377e231dbeSJesse Barnes } 11387e231dbeSJesse Barnes 11397e231dbeSJesse Barnes out: 11407e231dbeSJesse Barnes return ret; 11417e231dbeSJesse Barnes } 11427e231dbeSJesse Barnes 114323e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1144776ad806SJesse Barnes { 1145776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 11469db4a9c7SJesse Barnes int pipe; 1147b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1148776ad806SJesse Barnes 114910a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 115091d131d2SDaniel Vetter 1151cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1152cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1153776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1154cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1155cfc33bf7SVille Syrjälä port_name(port)); 1156cfc33bf7SVille Syrjälä } 1157776ad806SJesse Barnes 1158ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1159ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1160ce99c256SDaniel Vetter 1161776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1162515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1163776ad806SJesse Barnes 1164776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1165776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1166776ad806SJesse Barnes 1167776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1168776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1169776ad806SJesse Barnes 1170776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1171776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1172776ad806SJesse Barnes 11739db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 11749db4a9c7SJesse Barnes for_each_pipe(pipe) 11759db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 11769db4a9c7SJesse Barnes pipe_name(pipe), 11779db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1178776ad806SJesse Barnes 1179776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1180776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1181776ad806SJesse Barnes 1182776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1183776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1184776ad806SJesse Barnes 1185776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 11868664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11878664281bSPaulo Zanoni false)) 11888664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11898664281bSPaulo Zanoni 11908664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 11918664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11928664281bSPaulo Zanoni false)) 11938664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11948664281bSPaulo Zanoni } 11958664281bSPaulo Zanoni 11968664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 11978664281bSPaulo Zanoni { 11988664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11998664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 12008664281bSPaulo Zanoni 1201de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1202de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1203de032bf4SPaulo Zanoni 12048664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 12058664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 12068664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 12078664281bSPaulo Zanoni 12088664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 12098664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 12108664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 12118664281bSPaulo Zanoni 12128664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 12138664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 12148664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 12158664281bSPaulo Zanoni 12168664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 12178664281bSPaulo Zanoni } 12188664281bSPaulo Zanoni 12198664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 12208664281bSPaulo Zanoni { 12218664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 12228664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 12238664281bSPaulo Zanoni 1224de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1225de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1226de032bf4SPaulo Zanoni 12278664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 12288664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 12298664281bSPaulo Zanoni false)) 12308664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 12318664281bSPaulo Zanoni 12328664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 12338664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 12348664281bSPaulo Zanoni false)) 12358664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 12368664281bSPaulo Zanoni 12378664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 12388664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 12398664281bSPaulo Zanoni false)) 12408664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 12418664281bSPaulo Zanoni 12428664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1243776ad806SJesse Barnes } 1244776ad806SJesse Barnes 124523e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 124623e81d69SAdam Jackson { 124723e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 124823e81d69SAdam Jackson int pipe; 1249b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 125023e81d69SAdam Jackson 125110a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 125291d131d2SDaniel Vetter 1253cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1254cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 125523e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1256cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1257cfc33bf7SVille Syrjälä port_name(port)); 1258cfc33bf7SVille Syrjälä } 125923e81d69SAdam Jackson 126023e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1261ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 126223e81d69SAdam Jackson 126323e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1264515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 126523e81d69SAdam Jackson 126623e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 126723e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 126823e81d69SAdam Jackson 126923e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 127023e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 127123e81d69SAdam Jackson 127223e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 127323e81d69SAdam Jackson for_each_pipe(pipe) 127423e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 127523e81d69SAdam Jackson pipe_name(pipe), 127623e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 12778664281bSPaulo Zanoni 12788664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 12798664281bSPaulo Zanoni cpt_serr_int_handler(dev); 128023e81d69SAdam Jackson } 128123e81d69SAdam Jackson 1282c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1283c008bc6eSPaulo Zanoni { 1284c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1285c008bc6eSPaulo Zanoni 1286c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1287c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1288c008bc6eSPaulo Zanoni 1289c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1290c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1291c008bc6eSPaulo Zanoni 1292c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_VBLANK) 1293c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 0); 1294c008bc6eSPaulo Zanoni 1295c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_VBLANK) 1296c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 1); 1297c008bc6eSPaulo Zanoni 1298c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1299c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1300c008bc6eSPaulo Zanoni 1301c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1302c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1303c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1304c008bc6eSPaulo Zanoni 1305c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1306c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1307c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1308c008bc6eSPaulo Zanoni 1309c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEA_FLIP_DONE) { 1310c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 0); 1311c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 0); 1312c008bc6eSPaulo Zanoni } 1313c008bc6eSPaulo Zanoni 1314c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEB_FLIP_DONE) { 1315c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 1); 1316c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 1); 1317c008bc6eSPaulo Zanoni } 1318c008bc6eSPaulo Zanoni 1319c008bc6eSPaulo Zanoni /* check event from PCH */ 1320c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1321c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1322c008bc6eSPaulo Zanoni 1323c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1324c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1325c008bc6eSPaulo Zanoni else 1326c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1327c008bc6eSPaulo Zanoni 1328c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1329c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1330c008bc6eSPaulo Zanoni } 1331c008bc6eSPaulo Zanoni 1332c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1333c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1334c008bc6eSPaulo Zanoni } 1335c008bc6eSPaulo Zanoni 13369719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 13379719fb98SPaulo Zanoni { 13389719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 13399719fb98SPaulo Zanoni int i; 13409719fb98SPaulo Zanoni 13419719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 13429719fb98SPaulo Zanoni ivb_err_int_handler(dev); 13439719fb98SPaulo Zanoni 13449719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 13459719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 13469719fb98SPaulo Zanoni 13479719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 13489719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 13499719fb98SPaulo Zanoni 13509719fb98SPaulo Zanoni for (i = 0; i < 3; i++) { 13519719fb98SPaulo Zanoni if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 13529719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 13539719fb98SPaulo Zanoni if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 13549719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 13559719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 13569719fb98SPaulo Zanoni } 13579719fb98SPaulo Zanoni } 13589719fb98SPaulo Zanoni 13599719fb98SPaulo Zanoni /* check event from PCH */ 13609719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 13619719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 13629719fb98SPaulo Zanoni 13639719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 13649719fb98SPaulo Zanoni 13659719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 13669719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 13679719fb98SPaulo Zanoni } 13689719fb98SPaulo Zanoni } 13699719fb98SPaulo Zanoni 1370f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1371b1f14ad0SJesse Barnes { 1372b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1373b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1374f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 13750e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1376*333a8204SPaulo Zanoni bool err_int_reenable = false; 1377b1f14ad0SJesse Barnes 1378b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1379b1f14ad0SJesse Barnes 13808664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 13818664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 1382907b28c5SChris Wilson intel_uncore_check_errors(dev); 13838664281bSPaulo Zanoni 1384b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1385b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1386b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 138723a78516SPaulo Zanoni POSTING_READ(DEIER); 13880e43406bSChris Wilson 138944498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 139044498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 139144498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 139244498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 139344498aeaSPaulo Zanoni * due to its back queue). */ 1394ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 139544498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 139644498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 139744498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1398ab5c608bSBen Widawsky } 139944498aeaSPaulo Zanoni 14008664281bSPaulo Zanoni /* On Haswell, also mask ERR_INT because we don't want to risk 14018664281bSPaulo Zanoni * generating "unclaimed register" interrupts from inside the interrupt 14028664281bSPaulo Zanoni * handler. */ 14034bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 14044bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1405*333a8204SPaulo Zanoni err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; 1406*333a8204SPaulo Zanoni if (err_int_reenable) 14078664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 14084bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 14094bc9d430SDaniel Vetter } 14108664281bSPaulo Zanoni 14110e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 14120e43406bSChris Wilson if (gt_iir) { 1413d8fc8a47SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) 14140e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 1415d8fc8a47SPaulo Zanoni else 1416d8fc8a47SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 14170e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 14180e43406bSChris Wilson ret = IRQ_HANDLED; 14190e43406bSChris Wilson } 1420b1f14ad0SJesse Barnes 1421b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 14220e43406bSChris Wilson if (de_iir) { 1423f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 14249719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1425f1af8fc1SPaulo Zanoni else 1426f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 14270e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 14280e43406bSChris Wilson ret = IRQ_HANDLED; 14290e43406bSChris Wilson } 14300e43406bSChris Wilson 1431f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1432f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 14330e43406bSChris Wilson if (pm_iir) { 1434baf02a1fSBen Widawsky if (IS_HASWELL(dev)) 1435baf02a1fSBen Widawsky hsw_pm_irq_handler(dev_priv, pm_iir); 14364848405cSBen Widawsky else if (pm_iir & GEN6_PM_RPS_EVENTS) 1437d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1438b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 14390e43406bSChris Wilson ret = IRQ_HANDLED; 14400e43406bSChris Wilson } 1441f1af8fc1SPaulo Zanoni } 1442b1f14ad0SJesse Barnes 1443*333a8204SPaulo Zanoni if (err_int_reenable) { 14444bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 14454bc9d430SDaniel Vetter if (ivb_can_enable_err_int(dev)) 14468664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 14474bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 14484bc9d430SDaniel Vetter } 14498664281bSPaulo Zanoni 1450b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1451b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1452ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 145344498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 145444498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1455ab5c608bSBen Widawsky } 1456b1f14ad0SJesse Barnes 1457b1f14ad0SJesse Barnes return ret; 1458b1f14ad0SJesse Barnes } 1459b1f14ad0SJesse Barnes 14608a905236SJesse Barnes /** 14618a905236SJesse Barnes * i915_error_work_func - do process context error handling work 14628a905236SJesse Barnes * @work: work struct 14638a905236SJesse Barnes * 14648a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 14658a905236SJesse Barnes * was detected. 14668a905236SJesse Barnes */ 14678a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 14688a905236SJesse Barnes { 14691f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 14701f83fee0SDaniel Vetter work); 14711f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 14721f83fee0SDaniel Vetter gpu_error); 14738a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1474f69061beSDaniel Vetter struct intel_ring_buffer *ring; 1475cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1476cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1477cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1478f69061beSDaniel Vetter int i, ret; 14798a905236SJesse Barnes 1480f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 14818a905236SJesse Barnes 14827db0ba24SDaniel Vetter /* 14837db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 14847db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 14857db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 14867db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 14877db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 14887db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 14897db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 14907db0ba24SDaniel Vetter * work we don't need to worry about any other races. 14917db0ba24SDaniel Vetter */ 14927db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 149344d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 14947db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 14957db0ba24SDaniel Vetter reset_event); 14961f83fee0SDaniel Vetter 1497f69061beSDaniel Vetter ret = i915_reset(dev); 1498f69061beSDaniel Vetter 1499f69061beSDaniel Vetter if (ret == 0) { 1500f69061beSDaniel Vetter /* 1501f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1502f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1503f69061beSDaniel Vetter * complete. 1504f69061beSDaniel Vetter * 1505f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1506f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1507f69061beSDaniel Vetter * updates before 1508f69061beSDaniel Vetter * the counter increment. 1509f69061beSDaniel Vetter */ 1510f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1511f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1512f69061beSDaniel Vetter 1513f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1514f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 15151f83fee0SDaniel Vetter } else { 15161f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1517f316a42cSBen Gamari } 15181f83fee0SDaniel Vetter 1519f69061beSDaniel Vetter for_each_ring(ring, dev_priv, i) 1520f69061beSDaniel Vetter wake_up_all(&ring->irq_queue); 1521f69061beSDaniel Vetter 152296a02917SVille Syrjälä intel_display_handle_reset(dev); 152396a02917SVille Syrjälä 15241f83fee0SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 1525f316a42cSBen Gamari } 15268a905236SJesse Barnes } 15278a905236SJesse Barnes 152835aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1529c0e09200SDave Airlie { 15308a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1531bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 153263eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1533050ee91fSBen Widawsky int pipe, i; 153463eeaf38SJesse Barnes 153535aed2e6SChris Wilson if (!eir) 153635aed2e6SChris Wilson return; 153763eeaf38SJesse Barnes 1538a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 15398a905236SJesse Barnes 1540bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1541bd9854f9SBen Widawsky 15428a905236SJesse Barnes if (IS_G4X(dev)) { 15438a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 15448a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 15458a905236SJesse Barnes 1546a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1547a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1548050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1549050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1550a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1551a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 15528a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 15533143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 15548a905236SJesse Barnes } 15558a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 15568a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1557a70491ccSJoe Perches pr_err("page table error\n"); 1558a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 15598a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 15603143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 15618a905236SJesse Barnes } 15628a905236SJesse Barnes } 15638a905236SJesse Barnes 1564a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 156563eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 156663eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1567a70491ccSJoe Perches pr_err("page table error\n"); 1568a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 156963eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 15703143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 157163eeaf38SJesse Barnes } 15728a905236SJesse Barnes } 15738a905236SJesse Barnes 157463eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 1575a70491ccSJoe Perches pr_err("memory refresh error:\n"); 15769db4a9c7SJesse Barnes for_each_pipe(pipe) 1577a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 15789db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 157963eeaf38SJesse Barnes /* pipestat has already been acked */ 158063eeaf38SJesse Barnes } 158163eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 1582a70491ccSJoe Perches pr_err("instruction error\n"); 1583a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1584050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1585050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1586a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 158763eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 158863eeaf38SJesse Barnes 1589a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1590a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1591a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 159263eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 15933143a2bfSChris Wilson POSTING_READ(IPEIR); 159463eeaf38SJesse Barnes } else { 159563eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 159663eeaf38SJesse Barnes 1597a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1598a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1599a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1600a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 160163eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 16023143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 160363eeaf38SJesse Barnes } 160463eeaf38SJesse Barnes } 160563eeaf38SJesse Barnes 160663eeaf38SJesse Barnes I915_WRITE(EIR, eir); 16073143a2bfSChris Wilson POSTING_READ(EIR); 160863eeaf38SJesse Barnes eir = I915_READ(EIR); 160963eeaf38SJesse Barnes if (eir) { 161063eeaf38SJesse Barnes /* 161163eeaf38SJesse Barnes * some errors might have become stuck, 161263eeaf38SJesse Barnes * mask them. 161363eeaf38SJesse Barnes */ 161463eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 161563eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 161663eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 161763eeaf38SJesse Barnes } 161835aed2e6SChris Wilson } 161935aed2e6SChris Wilson 162035aed2e6SChris Wilson /** 162135aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 162235aed2e6SChris Wilson * @dev: drm device 162335aed2e6SChris Wilson * 162435aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 162535aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 162635aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 162735aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 162835aed2e6SChris Wilson * of a ring dump etc.). 162935aed2e6SChris Wilson */ 1630527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 163135aed2e6SChris Wilson { 163235aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1633b4519513SChris Wilson struct intel_ring_buffer *ring; 1634b4519513SChris Wilson int i; 163535aed2e6SChris Wilson 163635aed2e6SChris Wilson i915_capture_error_state(dev); 163735aed2e6SChris Wilson i915_report_and_clear_eir(dev); 16388a905236SJesse Barnes 1639ba1234d1SBen Gamari if (wedged) { 1640f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1641f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 1642ba1234d1SBen Gamari 164311ed50ecSBen Gamari /* 16441f83fee0SDaniel Vetter * Wakeup waiting processes so that the reset work item 16451f83fee0SDaniel Vetter * doesn't deadlock trying to grab various locks. 164611ed50ecSBen Gamari */ 1647b4519513SChris Wilson for_each_ring(ring, dev_priv, i) 1648b4519513SChris Wilson wake_up_all(&ring->irq_queue); 164911ed50ecSBen Gamari } 165011ed50ecSBen Gamari 165199584db3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 16528a905236SJesse Barnes } 16538a905236SJesse Barnes 165421ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 16554e5359cdSSimon Farnsworth { 16564e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 16574e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 16584e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 165905394f39SChris Wilson struct drm_i915_gem_object *obj; 16604e5359cdSSimon Farnsworth struct intel_unpin_work *work; 16614e5359cdSSimon Farnsworth unsigned long flags; 16624e5359cdSSimon Farnsworth bool stall_detected; 16634e5359cdSSimon Farnsworth 16644e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 16654e5359cdSSimon Farnsworth if (intel_crtc == NULL) 16664e5359cdSSimon Farnsworth return; 16674e5359cdSSimon Farnsworth 16684e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 16694e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 16704e5359cdSSimon Farnsworth 1671e7d841caSChris Wilson if (work == NULL || 1672e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1673e7d841caSChris Wilson !work->enable_stall_check) { 16744e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 16754e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 16764e5359cdSSimon Farnsworth return; 16774e5359cdSSimon Farnsworth } 16784e5359cdSSimon Farnsworth 16794e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 168005394f39SChris Wilson obj = work->pending_flip_obj; 1681a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 16829db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 1683446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1684f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 16854e5359cdSSimon Farnsworth } else { 16869db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 1687f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 168801f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 16894e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 16904e5359cdSSimon Farnsworth } 16914e5359cdSSimon Farnsworth 16924e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 16934e5359cdSSimon Farnsworth 16944e5359cdSSimon Farnsworth if (stall_detected) { 16954e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 16964e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 16974e5359cdSSimon Farnsworth } 16984e5359cdSSimon Farnsworth } 16994e5359cdSSimon Farnsworth 170042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 170142f52ef8SKeith Packard * we use as a pipe index 170242f52ef8SKeith Packard */ 1703f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 17040a3e67a4SJesse Barnes { 17050a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1706e9d21d7fSKeith Packard unsigned long irqflags; 170771e0ffa5SJesse Barnes 17085eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 170971e0ffa5SJesse Barnes return -EINVAL; 17100a3e67a4SJesse Barnes 17111ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1712f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 17137c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 17147c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 17150a3e67a4SJesse Barnes else 17167c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 17177c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 17188692d00eSChris Wilson 17198692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 17208692d00eSChris Wilson if (dev_priv->info->gen == 3) 17216b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 17221ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17238692d00eSChris Wilson 17240a3e67a4SJesse Barnes return 0; 17250a3e67a4SJesse Barnes } 17260a3e67a4SJesse Barnes 1727f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1728f796cf8fSJesse Barnes { 1729f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1730f796cf8fSJesse Barnes unsigned long irqflags; 1731b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1732b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1733f796cf8fSJesse Barnes 1734f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 1735f796cf8fSJesse Barnes return -EINVAL; 1736f796cf8fSJesse Barnes 1737f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1738b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1739b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1740b1f14ad0SJesse Barnes 1741b1f14ad0SJesse Barnes return 0; 1742b1f14ad0SJesse Barnes } 1743b1f14ad0SJesse Barnes 17447e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 17457e231dbeSJesse Barnes { 17467e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 17477e231dbeSJesse Barnes unsigned long irqflags; 174831acc7f5SJesse Barnes u32 imr; 17497e231dbeSJesse Barnes 17507e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 17517e231dbeSJesse Barnes return -EINVAL; 17527e231dbeSJesse Barnes 17537e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 17547e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 175531acc7f5SJesse Barnes if (pipe == 0) 17567e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 175731acc7f5SJesse Barnes else 17587e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 17597e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 176031acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 176131acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 17627e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17637e231dbeSJesse Barnes 17647e231dbeSJesse Barnes return 0; 17657e231dbeSJesse Barnes } 17667e231dbeSJesse Barnes 176742f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 176842f52ef8SKeith Packard * we use as a pipe index 176942f52ef8SKeith Packard */ 1770f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 17710a3e67a4SJesse Barnes { 17720a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1773e9d21d7fSKeith Packard unsigned long irqflags; 17740a3e67a4SJesse Barnes 17751ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 17768692d00eSChris Wilson if (dev_priv->info->gen == 3) 17776b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 17788692d00eSChris Wilson 17797c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 17807c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 17817c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 17821ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17830a3e67a4SJesse Barnes } 17840a3e67a4SJesse Barnes 1785f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1786f796cf8fSJesse Barnes { 1787f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1788f796cf8fSJesse Barnes unsigned long irqflags; 1789b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1790b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1791f796cf8fSJesse Barnes 1792f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1793b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1794b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1795b1f14ad0SJesse Barnes } 1796b1f14ad0SJesse Barnes 17977e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 17987e231dbeSJesse Barnes { 17997e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 18007e231dbeSJesse Barnes unsigned long irqflags; 180131acc7f5SJesse Barnes u32 imr; 18027e231dbeSJesse Barnes 18037e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 180431acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 180531acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 18067e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 180731acc7f5SJesse Barnes if (pipe == 0) 18087e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 180931acc7f5SJesse Barnes else 18107e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 18117e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 18127e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 18137e231dbeSJesse Barnes } 18147e231dbeSJesse Barnes 1815893eead0SChris Wilson static u32 1816893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 1817852835f3SZou Nan hai { 1818893eead0SChris Wilson return list_entry(ring->request_list.prev, 1819893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 1820893eead0SChris Wilson } 1821893eead0SChris Wilson 18229107e9d2SChris Wilson static bool 18239107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1824893eead0SChris Wilson { 18259107e9d2SChris Wilson return (list_empty(&ring->request_list) || 18269107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 1827f65d9421SBen Gamari } 1828f65d9421SBen Gamari 18296274f212SChris Wilson static struct intel_ring_buffer * 18306274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1831a24a11e6SChris Wilson { 1832a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 18336274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 1834a24a11e6SChris Wilson 1835a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1836a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 1837a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 18386274f212SChris Wilson return NULL; 1839a24a11e6SChris Wilson 1840a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 1841a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 1842a24a11e6SChris Wilson */ 18436274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1844a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 1845a24a11e6SChris Wilson do { 1846a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 1847a24a11e6SChris Wilson if (cmd == ipehr) 1848a24a11e6SChris Wilson break; 1849a24a11e6SChris Wilson 1850a24a11e6SChris Wilson acthd -= 4; 1851a24a11e6SChris Wilson if (acthd < acthd_min) 18526274f212SChris Wilson return NULL; 1853a24a11e6SChris Wilson } while (1); 1854a24a11e6SChris Wilson 18556274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 18566274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1857a24a11e6SChris Wilson } 1858a24a11e6SChris Wilson 18596274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 18606274f212SChris Wilson { 18616274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 18626274f212SChris Wilson struct intel_ring_buffer *signaller; 18636274f212SChris Wilson u32 seqno, ctl; 18646274f212SChris Wilson 18656274f212SChris Wilson ring->hangcheck.deadlock = true; 18666274f212SChris Wilson 18676274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 18686274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 18696274f212SChris Wilson return -1; 18706274f212SChris Wilson 18716274f212SChris Wilson /* cursory check for an unkickable deadlock */ 18726274f212SChris Wilson ctl = I915_READ_CTL(signaller); 18736274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 18746274f212SChris Wilson return -1; 18756274f212SChris Wilson 18766274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 18776274f212SChris Wilson } 18786274f212SChris Wilson 18796274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 18806274f212SChris Wilson { 18816274f212SChris Wilson struct intel_ring_buffer *ring; 18826274f212SChris Wilson int i; 18836274f212SChris Wilson 18846274f212SChris Wilson for_each_ring(ring, dev_priv, i) 18856274f212SChris Wilson ring->hangcheck.deadlock = false; 18866274f212SChris Wilson } 18876274f212SChris Wilson 1888ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 1889ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 18901ec14ad3SChris Wilson { 18911ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 18921ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 18939107e9d2SChris Wilson u32 tmp; 18949107e9d2SChris Wilson 18956274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 1896f2f4d82fSJani Nikula return HANGCHECK_ACTIVE; 18976274f212SChris Wilson 18989107e9d2SChris Wilson if (IS_GEN2(dev)) 1899f2f4d82fSJani Nikula return HANGCHECK_HUNG; 19009107e9d2SChris Wilson 19019107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 19029107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 19039107e9d2SChris Wilson * and break the hang. This should work on 19049107e9d2SChris Wilson * all but the second generation chipsets. 19059107e9d2SChris Wilson */ 19069107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 19071ec14ad3SChris Wilson if (tmp & RING_WAIT) { 19081ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 19091ec14ad3SChris Wilson ring->name); 19101ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 1911f2f4d82fSJani Nikula return HANGCHECK_KICK; 19121ec14ad3SChris Wilson } 1913a24a11e6SChris Wilson 19146274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 19156274f212SChris Wilson switch (semaphore_passed(ring)) { 19166274f212SChris Wilson default: 1917f2f4d82fSJani Nikula return HANGCHECK_HUNG; 19186274f212SChris Wilson case 1: 1919a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 1920a24a11e6SChris Wilson ring->name); 1921a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 1922f2f4d82fSJani Nikula return HANGCHECK_KICK; 19236274f212SChris Wilson case 0: 1924f2f4d82fSJani Nikula return HANGCHECK_WAIT; 19256274f212SChris Wilson } 19269107e9d2SChris Wilson } 19279107e9d2SChris Wilson 1928f2f4d82fSJani Nikula return HANGCHECK_HUNG; 1929a24a11e6SChris Wilson } 1930d1e61e7fSChris Wilson 1931f65d9421SBen Gamari /** 1932f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 193305407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 193405407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 193505407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 193605407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 193705407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 1938f65d9421SBen Gamari */ 1939a658b5d2SDamien Lespiau static void i915_hangcheck_elapsed(unsigned long data) 1940f65d9421SBen Gamari { 1941f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 1942f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 1943b4519513SChris Wilson struct intel_ring_buffer *ring; 1944b4519513SChris Wilson int i; 194505407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 19469107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 19479107e9d2SChris Wilson #define BUSY 1 19489107e9d2SChris Wilson #define KICK 5 19499107e9d2SChris Wilson #define HUNG 20 19509107e9d2SChris Wilson #define FIRE 30 1951893eead0SChris Wilson 19523e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 19533e0dc6b0SBen Widawsky return; 19543e0dc6b0SBen Widawsky 1955b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 195605407ff8SMika Kuoppala u32 seqno, acthd; 19579107e9d2SChris Wilson bool busy = true; 1958b4519513SChris Wilson 19596274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 19606274f212SChris Wilson 196105407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 196205407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 196305407ff8SMika Kuoppala 196405407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 19659107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 19669107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 19679107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 19689107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 19699107e9d2SChris Wilson ring->name); 19709107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 19719107e9d2SChris Wilson ring->hangcheck.score += HUNG; 19729107e9d2SChris Wilson } else 19739107e9d2SChris Wilson busy = false; 197405407ff8SMika Kuoppala } else { 19756274f212SChris Wilson /* We always increment the hangcheck score 19766274f212SChris Wilson * if the ring is busy and still processing 19776274f212SChris Wilson * the same request, so that no single request 19786274f212SChris Wilson * can run indefinitely (such as a chain of 19796274f212SChris Wilson * batches). The only time we do not increment 19806274f212SChris Wilson * the hangcheck score on this ring, if this 19816274f212SChris Wilson * ring is in a legitimate wait for another 19826274f212SChris Wilson * ring. In that case the waiting ring is a 19836274f212SChris Wilson * victim and we want to be sure we catch the 19846274f212SChris Wilson * right culprit. Then every time we do kick 19856274f212SChris Wilson * the ring, add a small increment to the 19866274f212SChris Wilson * score so that we can catch a batch that is 19876274f212SChris Wilson * being repeatedly kicked and so responsible 19886274f212SChris Wilson * for stalling the machine. 19899107e9d2SChris Wilson */ 1990ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 1991ad8beaeaSMika Kuoppala acthd); 1992ad8beaeaSMika Kuoppala 1993ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 1994f2f4d82fSJani Nikula case HANGCHECK_WAIT: 19956274f212SChris Wilson break; 1996f2f4d82fSJani Nikula case HANGCHECK_ACTIVE: 1997ea04cb31SJani Nikula ring->hangcheck.score += BUSY; 19986274f212SChris Wilson break; 1999f2f4d82fSJani Nikula case HANGCHECK_KICK: 2000ea04cb31SJani Nikula ring->hangcheck.score += KICK; 20016274f212SChris Wilson break; 2002f2f4d82fSJani Nikula case HANGCHECK_HUNG: 2003ea04cb31SJani Nikula ring->hangcheck.score += HUNG; 20046274f212SChris Wilson stuck[i] = true; 20056274f212SChris Wilson break; 20066274f212SChris Wilson } 200705407ff8SMika Kuoppala } 20089107e9d2SChris Wilson } else { 20099107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 20109107e9d2SChris Wilson * attempts across multiple batches. 20119107e9d2SChris Wilson */ 20129107e9d2SChris Wilson if (ring->hangcheck.score > 0) 20139107e9d2SChris Wilson ring->hangcheck.score--; 2014cbb465e7SChris Wilson } 2015f65d9421SBen Gamari 201605407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 201705407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 20189107e9d2SChris Wilson busy_count += busy; 201905407ff8SMika Kuoppala } 202005407ff8SMika Kuoppala 202105407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 20229107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2023acd78c11SBen Widawsky DRM_ERROR("%s on %s\n", 202405407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2025a43adf07SChris Wilson ring->name); 2026a43adf07SChris Wilson rings_hung++; 202705407ff8SMika Kuoppala } 202805407ff8SMika Kuoppala } 202905407ff8SMika Kuoppala 203005407ff8SMika Kuoppala if (rings_hung) 203105407ff8SMika Kuoppala return i915_handle_error(dev, true); 203205407ff8SMika Kuoppala 203305407ff8SMika Kuoppala if (busy_count) 203405407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 203505407ff8SMika Kuoppala * being added */ 203610cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 203710cd45b6SMika Kuoppala } 203810cd45b6SMika Kuoppala 203910cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 204010cd45b6SMika Kuoppala { 204110cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 204210cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 204310cd45b6SMika Kuoppala return; 204410cd45b6SMika Kuoppala 204599584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 204610cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2047f65d9421SBen Gamari } 2048f65d9421SBen Gamari 204991738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 205091738a95SPaulo Zanoni { 205191738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 205291738a95SPaulo Zanoni 205391738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 205491738a95SPaulo Zanoni return; 205591738a95SPaulo Zanoni 205691738a95SPaulo Zanoni /* south display irq */ 205791738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 205891738a95SPaulo Zanoni /* 205991738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 206091738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 206191738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 206291738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 206391738a95SPaulo Zanoni */ 206491738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 206591738a95SPaulo Zanoni POSTING_READ(SDEIER); 206691738a95SPaulo Zanoni } 206791738a95SPaulo Zanoni 2068d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2069d18ea1b5SDaniel Vetter { 2070d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2071d18ea1b5SDaniel Vetter 2072d18ea1b5SDaniel Vetter /* and GT */ 2073d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2074d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2075d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2076d18ea1b5SDaniel Vetter 2077d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2078d18ea1b5SDaniel Vetter /* and PM */ 2079d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2080d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2081d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2082d18ea1b5SDaniel Vetter } 2083d18ea1b5SDaniel Vetter } 2084d18ea1b5SDaniel Vetter 2085c0e09200SDave Airlie /* drm_dma.h hooks 2086c0e09200SDave Airlie */ 2087f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2088036a4a7dSZhenyu Wang { 2089036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2090036a4a7dSZhenyu Wang 20914697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 20924697995bSJesse Barnes 2093036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2094bdfcdb63SDaniel Vetter 2095036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2096036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 20973143a2bfSChris Wilson POSTING_READ(DEIER); 2098036a4a7dSZhenyu Wang 2099d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2100c650156aSZhenyu Wang 210191738a95SPaulo Zanoni ibx_irq_preinstall(dev); 21027d99163dSBen Widawsky } 21037d99163dSBen Widawsky 21047e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 21057e231dbeSJesse Barnes { 21067e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 21077e231dbeSJesse Barnes int pipe; 21087e231dbeSJesse Barnes 21097e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 21107e231dbeSJesse Barnes 21117e231dbeSJesse Barnes /* VLV magic */ 21127e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 21137e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 21147e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 21157e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 21167e231dbeSJesse Barnes 21177e231dbeSJesse Barnes /* and GT */ 21187e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 21197e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2120d18ea1b5SDaniel Vetter 2121d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 21227e231dbeSJesse Barnes 21237e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 21247e231dbeSJesse Barnes 21257e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 21267e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 21277e231dbeSJesse Barnes for_each_pipe(pipe) 21287e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 21297e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 21307e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 21317e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 21327e231dbeSJesse Barnes POSTING_READ(VLV_IER); 21337e231dbeSJesse Barnes } 21347e231dbeSJesse Barnes 213582a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 213682a28bcfSDaniel Vetter { 213782a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 213882a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 213982a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2140fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 214182a28bcfSDaniel Vetter 214282a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2143fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 214482a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2145cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2146fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 214782a28bcfSDaniel Vetter } else { 2148fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 214982a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2150cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2151fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 215282a28bcfSDaniel Vetter } 215382a28bcfSDaniel Vetter 2154fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 215582a28bcfSDaniel Vetter 21567fe0b973SKeith Packard /* 21577fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 21587fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 21597fe0b973SKeith Packard * 21607fe0b973SKeith Packard * This register is the same on all known PCH chips. 21617fe0b973SKeith Packard */ 21627fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 21637fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 21647fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 21657fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 21667fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 21677fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 21687fe0b973SKeith Packard } 21697fe0b973SKeith Packard 2170d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2171d46da437SPaulo Zanoni { 2172d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 217382a28bcfSDaniel Vetter u32 mask; 2174d46da437SPaulo Zanoni 2175692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2176692a04cfSDaniel Vetter return; 2177692a04cfSDaniel Vetter 21788664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 21798664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2180de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 21818664281bSPaulo Zanoni } else { 21828664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 21838664281bSPaulo Zanoni 21848664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 21858664281bSPaulo Zanoni } 2186ab5c608bSBen Widawsky 2187d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2188d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2189d46da437SPaulo Zanoni } 2190d46da437SPaulo Zanoni 21910a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 21920a9a8c91SDaniel Vetter { 21930a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 21940a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 21950a9a8c91SDaniel Vetter 21960a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 21970a9a8c91SDaniel Vetter 21980a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 21990a9a8c91SDaniel Vetter if (HAS_L3_GPU_CACHE(dev)) { 22000a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 22010a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 22020a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 22030a9a8c91SDaniel Vetter } 22040a9a8c91SDaniel Vetter 22050a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 22060a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 22070a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 22080a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 22090a9a8c91SDaniel Vetter } else { 22100a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 22110a9a8c91SDaniel Vetter } 22120a9a8c91SDaniel Vetter 22130a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 22140a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 22150a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 22160a9a8c91SDaniel Vetter POSTING_READ(GTIER); 22170a9a8c91SDaniel Vetter 22180a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 22190a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 22200a9a8c91SDaniel Vetter 22210a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 22220a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 22230a9a8c91SDaniel Vetter 2224605cd25bSPaulo Zanoni dev_priv->pm_irq_mask = 0xffffffff; 22250a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2226605cd25bSPaulo Zanoni I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 22270a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 22280a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 22290a9a8c91SDaniel Vetter } 22300a9a8c91SDaniel Vetter } 22310a9a8c91SDaniel Vetter 2232f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2233036a4a7dSZhenyu Wang { 22344bc9d430SDaniel Vetter unsigned long irqflags; 2235036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22368e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 22378e76f8dcSPaulo Zanoni 22388e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 22398e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 22408e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 22418e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 22428e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 22438e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 22448e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 22458e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 22468e76f8dcSPaulo Zanoni 22478e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 22488e76f8dcSPaulo Zanoni } else { 22498e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2250ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 22518664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 22528e76f8dcSPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 22538e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 22548e76f8dcSPaulo Zanoni } 2255036a4a7dSZhenyu Wang 22561ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2257036a4a7dSZhenyu Wang 2258036a4a7dSZhenyu Wang /* should always can generate irq */ 2259036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 22601ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 22618e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 22623143a2bfSChris Wilson POSTING_READ(DEIER); 2263036a4a7dSZhenyu Wang 22640a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2265036a4a7dSZhenyu Wang 2266d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 22677fe0b973SKeith Packard 2268f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 22696005ce42SDaniel Vetter /* Enable PCU event interrupts 22706005ce42SDaniel Vetter * 22716005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 22724bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 22734bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 22744bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2275f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 22764bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2277f97108d1SJesse Barnes } 2278f97108d1SJesse Barnes 2279036a4a7dSZhenyu Wang return 0; 2280036a4a7dSZhenyu Wang } 2281036a4a7dSZhenyu Wang 22827e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 22837e231dbeSJesse Barnes { 22847e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22857e231dbeSJesse Barnes u32 enable_mask; 228631acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2287b79480baSDaniel Vetter unsigned long irqflags; 22887e231dbeSJesse Barnes 22897e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 229031acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 229131acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 229231acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 22937e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22947e231dbeSJesse Barnes 229531acc7f5SJesse Barnes /* 229631acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 229731acc7f5SJesse Barnes * toggle them based on usage. 229831acc7f5SJesse Barnes */ 229931acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 230031acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 230131acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 23027e231dbeSJesse Barnes 230320afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 230420afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 230520afbda2SDaniel Vetter 23067e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 23077e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 23087e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23097e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 23107e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 23117e231dbeSJesse Barnes POSTING_READ(VLV_IER); 23127e231dbeSJesse Barnes 2313b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2314b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2315b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 231631acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2317515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 231831acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2319b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 232031acc7f5SJesse Barnes 23217e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23227e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23237e231dbeSJesse Barnes 23240a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 23257e231dbeSJesse Barnes 23267e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 23277e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 23287e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 23297e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 23307e231dbeSJesse Barnes #endif 23317e231dbeSJesse Barnes 23327e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 233320afbda2SDaniel Vetter 233420afbda2SDaniel Vetter return 0; 233520afbda2SDaniel Vetter } 233620afbda2SDaniel Vetter 23377e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 23387e231dbeSJesse Barnes { 23397e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23407e231dbeSJesse Barnes int pipe; 23417e231dbeSJesse Barnes 23427e231dbeSJesse Barnes if (!dev_priv) 23437e231dbeSJesse Barnes return; 23447e231dbeSJesse Barnes 2345ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2346ac4c16c5SEgbert Eich 23477e231dbeSJesse Barnes for_each_pipe(pipe) 23487e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 23497e231dbeSJesse Barnes 23507e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 23517e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 23527e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 23537e231dbeSJesse Barnes for_each_pipe(pipe) 23547e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 23557e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 23567e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 23577e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 23587e231dbeSJesse Barnes POSTING_READ(VLV_IER); 23597e231dbeSJesse Barnes } 23607e231dbeSJesse Barnes 2361f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2362036a4a7dSZhenyu Wang { 2363036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23644697995bSJesse Barnes 23654697995bSJesse Barnes if (!dev_priv) 23664697995bSJesse Barnes return; 23674697995bSJesse Barnes 2368ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2369ac4c16c5SEgbert Eich 2370036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2371036a4a7dSZhenyu Wang 2372036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2373036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2374036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 23758664281bSPaulo Zanoni if (IS_GEN7(dev)) 23768664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2377036a4a7dSZhenyu Wang 2378036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2379036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2380036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2381192aac1fSKeith Packard 2382ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2383ab5c608bSBen Widawsky return; 2384ab5c608bSBen Widawsky 2385192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2386192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2387192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 23888664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 23898664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2390036a4a7dSZhenyu Wang } 2391036a4a7dSZhenyu Wang 2392c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2393c2798b19SChris Wilson { 2394c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2395c2798b19SChris Wilson int pipe; 2396c2798b19SChris Wilson 2397c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2398c2798b19SChris Wilson 2399c2798b19SChris Wilson for_each_pipe(pipe) 2400c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2401c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2402c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2403c2798b19SChris Wilson POSTING_READ16(IER); 2404c2798b19SChris Wilson } 2405c2798b19SChris Wilson 2406c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2407c2798b19SChris Wilson { 2408c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2409c2798b19SChris Wilson 2410c2798b19SChris Wilson I915_WRITE16(EMR, 2411c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2412c2798b19SChris Wilson 2413c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2414c2798b19SChris Wilson dev_priv->irq_mask = 2415c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2416c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2417c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2418c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2419c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2420c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2421c2798b19SChris Wilson 2422c2798b19SChris Wilson I915_WRITE16(IER, 2423c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2424c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2425c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2426c2798b19SChris Wilson I915_USER_INTERRUPT); 2427c2798b19SChris Wilson POSTING_READ16(IER); 2428c2798b19SChris Wilson 2429c2798b19SChris Wilson return 0; 2430c2798b19SChris Wilson } 2431c2798b19SChris Wilson 243290a72f87SVille Syrjälä /* 243390a72f87SVille Syrjälä * Returns true when a page flip has completed. 243490a72f87SVille Syrjälä */ 243590a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 243690a72f87SVille Syrjälä int pipe, u16 iir) 243790a72f87SVille Syrjälä { 243890a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 243990a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 244090a72f87SVille Syrjälä 244190a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 244290a72f87SVille Syrjälä return false; 244390a72f87SVille Syrjälä 244490a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 244590a72f87SVille Syrjälä return false; 244690a72f87SVille Syrjälä 244790a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 244890a72f87SVille Syrjälä 244990a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 245090a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 245190a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 245290a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 245390a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 245490a72f87SVille Syrjälä */ 245590a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 245690a72f87SVille Syrjälä return false; 245790a72f87SVille Syrjälä 245890a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 245990a72f87SVille Syrjälä 246090a72f87SVille Syrjälä return true; 246190a72f87SVille Syrjälä } 246290a72f87SVille Syrjälä 2463ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2464c2798b19SChris Wilson { 2465c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2466c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2467c2798b19SChris Wilson u16 iir, new_iir; 2468c2798b19SChris Wilson u32 pipe_stats[2]; 2469c2798b19SChris Wilson unsigned long irqflags; 2470c2798b19SChris Wilson int pipe; 2471c2798b19SChris Wilson u16 flip_mask = 2472c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2473c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2474c2798b19SChris Wilson 2475c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2476c2798b19SChris Wilson 2477c2798b19SChris Wilson iir = I915_READ16(IIR); 2478c2798b19SChris Wilson if (iir == 0) 2479c2798b19SChris Wilson return IRQ_NONE; 2480c2798b19SChris Wilson 2481c2798b19SChris Wilson while (iir & ~flip_mask) { 2482c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2483c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 2484c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 2485c2798b19SChris Wilson * interrupts (for non-MSI). 2486c2798b19SChris Wilson */ 2487c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2488c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2489c2798b19SChris Wilson i915_handle_error(dev, false); 2490c2798b19SChris Wilson 2491c2798b19SChris Wilson for_each_pipe(pipe) { 2492c2798b19SChris Wilson int reg = PIPESTAT(pipe); 2493c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2494c2798b19SChris Wilson 2495c2798b19SChris Wilson /* 2496c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 2497c2798b19SChris Wilson */ 2498c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2499c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2500c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2501c2798b19SChris Wilson pipe_name(pipe)); 2502c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2503c2798b19SChris Wilson } 2504c2798b19SChris Wilson } 2505c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2506c2798b19SChris Wilson 2507c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 2508c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 2509c2798b19SChris Wilson 2510d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 2511c2798b19SChris Wilson 2512c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 2513c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2514c2798b19SChris Wilson 2515c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 251690a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 251790a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2518c2798b19SChris Wilson 2519c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 252090a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 252190a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2522c2798b19SChris Wilson 2523c2798b19SChris Wilson iir = new_iir; 2524c2798b19SChris Wilson } 2525c2798b19SChris Wilson 2526c2798b19SChris Wilson return IRQ_HANDLED; 2527c2798b19SChris Wilson } 2528c2798b19SChris Wilson 2529c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 2530c2798b19SChris Wilson { 2531c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2532c2798b19SChris Wilson int pipe; 2533c2798b19SChris Wilson 2534c2798b19SChris Wilson for_each_pipe(pipe) { 2535c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 2536c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2537c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2538c2798b19SChris Wilson } 2539c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2540c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2541c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 2542c2798b19SChris Wilson } 2543c2798b19SChris Wilson 2544a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 2545a266c7d5SChris Wilson { 2546a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2547a266c7d5SChris Wilson int pipe; 2548a266c7d5SChris Wilson 2549a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2550a266c7d5SChris Wilson 2551a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2552a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2553a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2554a266c7d5SChris Wilson } 2555a266c7d5SChris Wilson 255600d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 2557a266c7d5SChris Wilson for_each_pipe(pipe) 2558a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2559a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2560a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2561a266c7d5SChris Wilson POSTING_READ(IER); 2562a266c7d5SChris Wilson } 2563a266c7d5SChris Wilson 2564a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 2565a266c7d5SChris Wilson { 2566a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 256738bde180SChris Wilson u32 enable_mask; 2568a266c7d5SChris Wilson 256938bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 257038bde180SChris Wilson 257138bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 257238bde180SChris Wilson dev_priv->irq_mask = 257338bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 257438bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 257538bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 257638bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 257738bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 257838bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 257938bde180SChris Wilson 258038bde180SChris Wilson enable_mask = 258138bde180SChris Wilson I915_ASLE_INTERRUPT | 258238bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 258338bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 258438bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 258538bde180SChris Wilson I915_USER_INTERRUPT; 258638bde180SChris Wilson 2587a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 258820afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 258920afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 259020afbda2SDaniel Vetter 2591a266c7d5SChris Wilson /* Enable in IER... */ 2592a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2593a266c7d5SChris Wilson /* and unmask in IMR */ 2594a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2595a266c7d5SChris Wilson } 2596a266c7d5SChris Wilson 2597a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2598a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2599a266c7d5SChris Wilson POSTING_READ(IER); 2600a266c7d5SChris Wilson 2601f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 260220afbda2SDaniel Vetter 260320afbda2SDaniel Vetter return 0; 260420afbda2SDaniel Vetter } 260520afbda2SDaniel Vetter 260690a72f87SVille Syrjälä /* 260790a72f87SVille Syrjälä * Returns true when a page flip has completed. 260890a72f87SVille Syrjälä */ 260990a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 261090a72f87SVille Syrjälä int plane, int pipe, u32 iir) 261190a72f87SVille Syrjälä { 261290a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 261390a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 261490a72f87SVille Syrjälä 261590a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 261690a72f87SVille Syrjälä return false; 261790a72f87SVille Syrjälä 261890a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 261990a72f87SVille Syrjälä return false; 262090a72f87SVille Syrjälä 262190a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 262290a72f87SVille Syrjälä 262390a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 262490a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 262590a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 262690a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 262790a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 262890a72f87SVille Syrjälä */ 262990a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 263090a72f87SVille Syrjälä return false; 263190a72f87SVille Syrjälä 263290a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 263390a72f87SVille Syrjälä 263490a72f87SVille Syrjälä return true; 263590a72f87SVille Syrjälä } 263690a72f87SVille Syrjälä 2637ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 2638a266c7d5SChris Wilson { 2639a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2640a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26418291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2642a266c7d5SChris Wilson unsigned long irqflags; 264338bde180SChris Wilson u32 flip_mask = 264438bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 264538bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 264638bde180SChris Wilson int pipe, ret = IRQ_NONE; 2647a266c7d5SChris Wilson 2648a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2649a266c7d5SChris Wilson 2650a266c7d5SChris Wilson iir = I915_READ(IIR); 265138bde180SChris Wilson do { 265238bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 26538291ee90SChris Wilson bool blc_event = false; 2654a266c7d5SChris Wilson 2655a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2656a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2657a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2658a266c7d5SChris Wilson * interrupts (for non-MSI). 2659a266c7d5SChris Wilson */ 2660a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2661a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2662a266c7d5SChris Wilson i915_handle_error(dev, false); 2663a266c7d5SChris Wilson 2664a266c7d5SChris Wilson for_each_pipe(pipe) { 2665a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2666a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2667a266c7d5SChris Wilson 266838bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 2669a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2670a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2671a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2672a266c7d5SChris Wilson pipe_name(pipe)); 2673a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 267438bde180SChris Wilson irq_received = true; 2675a266c7d5SChris Wilson } 2676a266c7d5SChris Wilson } 2677a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2678a266c7d5SChris Wilson 2679a266c7d5SChris Wilson if (!irq_received) 2680a266c7d5SChris Wilson break; 2681a266c7d5SChris Wilson 2682a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2683a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 2684a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2685a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2686b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2687a266c7d5SChris Wilson 2688a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2689a266c7d5SChris Wilson hotplug_status); 269091d131d2SDaniel Vetter 269110a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 269291d131d2SDaniel Vetter 2693a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 269438bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 2695a266c7d5SChris Wilson } 2696a266c7d5SChris Wilson 269738bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 2698a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2699a266c7d5SChris Wilson 2700a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2701a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2702a266c7d5SChris Wilson 2703a266c7d5SChris Wilson for_each_pipe(pipe) { 270438bde180SChris Wilson int plane = pipe; 270538bde180SChris Wilson if (IS_MOBILE(dev)) 270638bde180SChris Wilson plane = !plane; 27075e2032d4SVille Syrjälä 270890a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 270990a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 271090a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2711a266c7d5SChris Wilson 2712a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2713a266c7d5SChris Wilson blc_event = true; 2714a266c7d5SChris Wilson } 2715a266c7d5SChris Wilson 2716a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2717a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2718a266c7d5SChris Wilson 2719a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2720a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2721a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2722a266c7d5SChris Wilson * we would never get another interrupt. 2723a266c7d5SChris Wilson * 2724a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2725a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2726a266c7d5SChris Wilson * another one. 2727a266c7d5SChris Wilson * 2728a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2729a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2730a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2731a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2732a266c7d5SChris Wilson * stray interrupts. 2733a266c7d5SChris Wilson */ 273438bde180SChris Wilson ret = IRQ_HANDLED; 2735a266c7d5SChris Wilson iir = new_iir; 273638bde180SChris Wilson } while (iir & ~flip_mask); 2737a266c7d5SChris Wilson 2738d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 27398291ee90SChris Wilson 2740a266c7d5SChris Wilson return ret; 2741a266c7d5SChris Wilson } 2742a266c7d5SChris Wilson 2743a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 2744a266c7d5SChris Wilson { 2745a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2746a266c7d5SChris Wilson int pipe; 2747a266c7d5SChris Wilson 2748ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2749ac4c16c5SEgbert Eich 2750a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2751a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2752a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2753a266c7d5SChris Wilson } 2754a266c7d5SChris Wilson 275500d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 275655b39755SChris Wilson for_each_pipe(pipe) { 275755b39755SChris Wilson /* Clear enable bits; then clear status bits */ 2758a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 275955b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 276055b39755SChris Wilson } 2761a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2762a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2763a266c7d5SChris Wilson 2764a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 2765a266c7d5SChris Wilson } 2766a266c7d5SChris Wilson 2767a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 2768a266c7d5SChris Wilson { 2769a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2770a266c7d5SChris Wilson int pipe; 2771a266c7d5SChris Wilson 2772a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2773a266c7d5SChris Wilson 2774a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2775a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2776a266c7d5SChris Wilson 2777a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 2778a266c7d5SChris Wilson for_each_pipe(pipe) 2779a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2780a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2781a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2782a266c7d5SChris Wilson POSTING_READ(IER); 2783a266c7d5SChris Wilson } 2784a266c7d5SChris Wilson 2785a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 2786a266c7d5SChris Wilson { 2787a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2788bbba0a97SChris Wilson u32 enable_mask; 2789a266c7d5SChris Wilson u32 error_mask; 2790b79480baSDaniel Vetter unsigned long irqflags; 2791a266c7d5SChris Wilson 2792a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 2793bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2794adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 2795bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2796bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2797bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2798bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2799bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2800bbba0a97SChris Wilson 2801bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 280221ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 280321ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2804bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 2805bbba0a97SChris Wilson 2806bbba0a97SChris Wilson if (IS_G4X(dev)) 2807bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 2808a266c7d5SChris Wilson 2809b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2810b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2811b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2812515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2813b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2814a266c7d5SChris Wilson 2815a266c7d5SChris Wilson /* 2816a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 2817a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 2818a266c7d5SChris Wilson */ 2819a266c7d5SChris Wilson if (IS_G4X(dev)) { 2820a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 2821a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 2822a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 2823a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2824a266c7d5SChris Wilson } else { 2825a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 2826a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2827a266c7d5SChris Wilson } 2828a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 2829a266c7d5SChris Wilson 2830a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2831a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2832a266c7d5SChris Wilson POSTING_READ(IER); 2833a266c7d5SChris Wilson 283420afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 283520afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 283620afbda2SDaniel Vetter 2837f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 283820afbda2SDaniel Vetter 283920afbda2SDaniel Vetter return 0; 284020afbda2SDaniel Vetter } 284120afbda2SDaniel Vetter 2842bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 284320afbda2SDaniel Vetter { 284420afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2845e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 2846cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 284720afbda2SDaniel Vetter u32 hotplug_en; 284820afbda2SDaniel Vetter 2849b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2850b5ea2d56SDaniel Vetter 2851bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 2852bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2853bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2854adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 2855e5868a31SEgbert Eich /* enable bits are the same for all generations */ 2856cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2857cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2858cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2859a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 2860a266c7d5SChris Wilson to generate a spurious hotplug event about three 2861a266c7d5SChris Wilson seconds later. So just do it once. 2862a266c7d5SChris Wilson */ 2863a266c7d5SChris Wilson if (IS_G4X(dev)) 2864a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 286585fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2866a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2867a266c7d5SChris Wilson 2868a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 2869a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2870a266c7d5SChris Wilson } 2871bac56d5bSEgbert Eich } 2872a266c7d5SChris Wilson 2873ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 2874a266c7d5SChris Wilson { 2875a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2876a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2877a266c7d5SChris Wilson u32 iir, new_iir; 2878a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 2879a266c7d5SChris Wilson unsigned long irqflags; 2880a266c7d5SChris Wilson int irq_received; 2881a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 288221ad8330SVille Syrjälä u32 flip_mask = 288321ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 288421ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2885a266c7d5SChris Wilson 2886a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2887a266c7d5SChris Wilson 2888a266c7d5SChris Wilson iir = I915_READ(IIR); 2889a266c7d5SChris Wilson 2890a266c7d5SChris Wilson for (;;) { 28912c8ba29fSChris Wilson bool blc_event = false; 28922c8ba29fSChris Wilson 289321ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 2894a266c7d5SChris Wilson 2895a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2896a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2897a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2898a266c7d5SChris Wilson * interrupts (for non-MSI). 2899a266c7d5SChris Wilson */ 2900a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2901a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2902a266c7d5SChris Wilson i915_handle_error(dev, false); 2903a266c7d5SChris Wilson 2904a266c7d5SChris Wilson for_each_pipe(pipe) { 2905a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2906a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2907a266c7d5SChris Wilson 2908a266c7d5SChris Wilson /* 2909a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 2910a266c7d5SChris Wilson */ 2911a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2912a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2913a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2914a266c7d5SChris Wilson pipe_name(pipe)); 2915a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2916a266c7d5SChris Wilson irq_received = 1; 2917a266c7d5SChris Wilson } 2918a266c7d5SChris Wilson } 2919a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2920a266c7d5SChris Wilson 2921a266c7d5SChris Wilson if (!irq_received) 2922a266c7d5SChris Wilson break; 2923a266c7d5SChris Wilson 2924a266c7d5SChris Wilson ret = IRQ_HANDLED; 2925a266c7d5SChris Wilson 2926a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2927adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2928a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2929b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 2930b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 29314f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 2932a266c7d5SChris Wilson 2933a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2934a266c7d5SChris Wilson hotplug_status); 293591d131d2SDaniel Vetter 293610a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 293710a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 293891d131d2SDaniel Vetter 2939a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2940a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 2941a266c7d5SChris Wilson } 2942a266c7d5SChris Wilson 294321ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 2944a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2945a266c7d5SChris Wilson 2946a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2947a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2948a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 2949a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 2950a266c7d5SChris Wilson 2951a266c7d5SChris Wilson for_each_pipe(pipe) { 29522c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 295390a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 295490a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 2955a266c7d5SChris Wilson 2956a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2957a266c7d5SChris Wilson blc_event = true; 2958a266c7d5SChris Wilson } 2959a266c7d5SChris Wilson 2960a266c7d5SChris Wilson 2961a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2962a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2963a266c7d5SChris Wilson 2964515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2965515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 2966515ac2bbSDaniel Vetter 2967a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2968a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2969a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2970a266c7d5SChris Wilson * we would never get another interrupt. 2971a266c7d5SChris Wilson * 2972a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2973a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2974a266c7d5SChris Wilson * another one. 2975a266c7d5SChris Wilson * 2976a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2977a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2978a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2979a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2980a266c7d5SChris Wilson * stray interrupts. 2981a266c7d5SChris Wilson */ 2982a266c7d5SChris Wilson iir = new_iir; 2983a266c7d5SChris Wilson } 2984a266c7d5SChris Wilson 2985d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 29862c8ba29fSChris Wilson 2987a266c7d5SChris Wilson return ret; 2988a266c7d5SChris Wilson } 2989a266c7d5SChris Wilson 2990a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 2991a266c7d5SChris Wilson { 2992a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2993a266c7d5SChris Wilson int pipe; 2994a266c7d5SChris Wilson 2995a266c7d5SChris Wilson if (!dev_priv) 2996a266c7d5SChris Wilson return; 2997a266c7d5SChris Wilson 2998ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2999ac4c16c5SEgbert Eich 3000a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3001a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3002a266c7d5SChris Wilson 3003a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3004a266c7d5SChris Wilson for_each_pipe(pipe) 3005a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3006a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3007a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3008a266c7d5SChris Wilson 3009a266c7d5SChris Wilson for_each_pipe(pipe) 3010a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3011a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3012a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3013a266c7d5SChris Wilson } 3014a266c7d5SChris Wilson 3015ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3016ac4c16c5SEgbert Eich { 3017ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3018ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3019ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3020ac4c16c5SEgbert Eich unsigned long irqflags; 3021ac4c16c5SEgbert Eich int i; 3022ac4c16c5SEgbert Eich 3023ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3024ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3025ac4c16c5SEgbert Eich struct drm_connector *connector; 3026ac4c16c5SEgbert Eich 3027ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3028ac4c16c5SEgbert Eich continue; 3029ac4c16c5SEgbert Eich 3030ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3031ac4c16c5SEgbert Eich 3032ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3033ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3034ac4c16c5SEgbert Eich 3035ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3036ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3037ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3038ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3039ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3040ac4c16c5SEgbert Eich if (!connector->polled) 3041ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3042ac4c16c5SEgbert Eich } 3043ac4c16c5SEgbert Eich } 3044ac4c16c5SEgbert Eich } 3045ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3046ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3047ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3048ac4c16c5SEgbert Eich } 3049ac4c16c5SEgbert Eich 3050f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3051f71d4af4SJesse Barnes { 30528b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 30538b2e326dSChris Wilson 30548b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 305599584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3056c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3057a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 30588b2e326dSChris Wilson 305999584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 306099584db3SDaniel Vetter i915_hangcheck_elapsed, 306161bac78eSDaniel Vetter (unsigned long) dev); 3062ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3063ac4c16c5SEgbert Eich (unsigned long) dev_priv); 306461bac78eSDaniel Vetter 306597a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 30669ee32feaSDaniel Vetter 3067f71d4af4SJesse Barnes dev->driver->get_vblank_counter = i915_get_vblank_counter; 3068f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 30697d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3070f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3071f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3072f71d4af4SJesse Barnes } 3073f71d4af4SJesse Barnes 3074c3613de9SKeith Packard if (drm_core_check_feature(dev, DRIVER_MODESET)) 3075f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3076c3613de9SKeith Packard else 3077c3613de9SKeith Packard dev->driver->get_vblank_timestamp = NULL; 3078f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3079f71d4af4SJesse Barnes 30807e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 30817e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 30827e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 30837e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 30847e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 30857e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 30867e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3087fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3088f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3089f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3090f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3091f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3092f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3093f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3094f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 309582a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3096f71d4af4SJesse Barnes } else { 3097c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3098c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3099c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3100c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3101c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3102a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3103a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3104a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3105a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3106a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 310720afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3108c2798b19SChris Wilson } else { 3109a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3110a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3111a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3112a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3113bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3114c2798b19SChris Wilson } 3115f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3116f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3117f71d4af4SJesse Barnes } 3118f71d4af4SJesse Barnes } 311920afbda2SDaniel Vetter 312020afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 312120afbda2SDaniel Vetter { 312220afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3123821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3124821450c6SEgbert Eich struct drm_connector *connector; 3125b5ea2d56SDaniel Vetter unsigned long irqflags; 3126821450c6SEgbert Eich int i; 312720afbda2SDaniel Vetter 3128821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3129821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3130821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3131821450c6SEgbert Eich } 3132821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3133821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3134821450c6SEgbert Eich connector->polled = intel_connector->polled; 3135821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3136821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3137821450c6SEgbert Eich } 3138b5ea2d56SDaniel Vetter 3139b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3140b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3141b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 314220afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 314320afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3144b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 314520afbda2SDaniel Vetter } 3146