1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i965[] = { 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 83e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 84e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 85e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 86e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 87e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 88e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 89e5868a31SEgbert Eich }; 90e5868a31SEgbert Eich 91cd569aedSEgbert Eich static void ibx_hpd_irq_setup(struct drm_device *dev); 92cd569aedSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev); 93e5868a31SEgbert Eich 94036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 95995b6762SChris Wilson static void 96f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97036a4a7dSZhenyu Wang { 981ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 991ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 1001ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1013143a2bfSChris Wilson POSTING_READ(DEIMR); 102036a4a7dSZhenyu Wang } 103036a4a7dSZhenyu Wang } 104036a4a7dSZhenyu Wang 1050ff9800aSPaulo Zanoni static void 106f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 107036a4a7dSZhenyu Wang { 1081ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1091ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1101ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1113143a2bfSChris Wilson POSTING_READ(DEIMR); 112036a4a7dSZhenyu Wang } 113036a4a7dSZhenyu Wang } 114036a4a7dSZhenyu Wang 1158664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1168664281bSPaulo Zanoni { 1178664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1188664281bSPaulo Zanoni struct intel_crtc *crtc; 1198664281bSPaulo Zanoni enum pipe pipe; 1208664281bSPaulo Zanoni 1218664281bSPaulo Zanoni for_each_pipe(pipe) { 1228664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1238664281bSPaulo Zanoni 1248664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 1258664281bSPaulo Zanoni return false; 1268664281bSPaulo Zanoni } 1278664281bSPaulo Zanoni 1288664281bSPaulo Zanoni return true; 1298664281bSPaulo Zanoni } 1308664281bSPaulo Zanoni 1318664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 1328664281bSPaulo Zanoni { 1338664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1348664281bSPaulo Zanoni enum pipe pipe; 1358664281bSPaulo Zanoni struct intel_crtc *crtc; 1368664281bSPaulo Zanoni 1378664281bSPaulo Zanoni for_each_pipe(pipe) { 1388664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1398664281bSPaulo Zanoni 1408664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 1418664281bSPaulo Zanoni return false; 1428664281bSPaulo Zanoni } 1438664281bSPaulo Zanoni 1448664281bSPaulo Zanoni return true; 1458664281bSPaulo Zanoni } 1468664281bSPaulo Zanoni 1478664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 1488664281bSPaulo Zanoni enum pipe pipe, bool enable) 1498664281bSPaulo Zanoni { 1508664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1518664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 1528664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 1538664281bSPaulo Zanoni 1548664281bSPaulo Zanoni if (enable) 1558664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1568664281bSPaulo Zanoni else 1578664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1588664281bSPaulo Zanoni } 1598664281bSPaulo Zanoni 1608664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 1618664281bSPaulo Zanoni bool enable) 1628664281bSPaulo Zanoni { 1638664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1648664281bSPaulo Zanoni 1658664281bSPaulo Zanoni if (enable) { 1668664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 1678664281bSPaulo Zanoni return; 1688664281bSPaulo Zanoni 1698664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A | 1708664281bSPaulo Zanoni ERR_INT_FIFO_UNDERRUN_B | 1718664281bSPaulo Zanoni ERR_INT_FIFO_UNDERRUN_C); 1728664281bSPaulo Zanoni 1738664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1748664281bSPaulo Zanoni } else { 1758664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1768664281bSPaulo Zanoni } 1778664281bSPaulo Zanoni } 1788664281bSPaulo Zanoni 1798664281bSPaulo Zanoni static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 1808664281bSPaulo Zanoni bool enable) 1818664281bSPaulo Zanoni { 1828664281bSPaulo Zanoni struct drm_device *dev = crtc->base.dev; 1838664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1848664281bSPaulo Zanoni uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 1858664281bSPaulo Zanoni SDE_TRANSB_FIFO_UNDER; 1868664281bSPaulo Zanoni 1878664281bSPaulo Zanoni if (enable) 1888664281bSPaulo Zanoni I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); 1898664281bSPaulo Zanoni else 1908664281bSPaulo Zanoni I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); 1918664281bSPaulo Zanoni 1928664281bSPaulo Zanoni POSTING_READ(SDEIMR); 1938664281bSPaulo Zanoni } 1948664281bSPaulo Zanoni 1958664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 1968664281bSPaulo Zanoni enum transcoder pch_transcoder, 1978664281bSPaulo Zanoni bool enable) 1988664281bSPaulo Zanoni { 1998664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2008664281bSPaulo Zanoni 2018664281bSPaulo Zanoni if (enable) { 2028664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 2038664281bSPaulo Zanoni return; 2048664281bSPaulo Zanoni 2058664281bSPaulo Zanoni I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | 2068664281bSPaulo Zanoni SERR_INT_TRANS_B_FIFO_UNDERRUN | 2078664281bSPaulo Zanoni SERR_INT_TRANS_C_FIFO_UNDERRUN); 2088664281bSPaulo Zanoni 2098664281bSPaulo Zanoni I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT); 2108664281bSPaulo Zanoni } else { 2118664281bSPaulo Zanoni I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); 2128664281bSPaulo Zanoni } 2138664281bSPaulo Zanoni 2148664281bSPaulo Zanoni POSTING_READ(SDEIMR); 2158664281bSPaulo Zanoni } 2168664281bSPaulo Zanoni 2178664281bSPaulo Zanoni /** 2188664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 2198664281bSPaulo Zanoni * @dev: drm device 2208664281bSPaulo Zanoni * @pipe: pipe 2218664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2228664281bSPaulo Zanoni * 2238664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 2248664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 2258664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 2268664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 2278664281bSPaulo Zanoni * bit for all the pipes. 2288664281bSPaulo Zanoni * 2298664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 2308664281bSPaulo Zanoni */ 2318664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 2328664281bSPaulo Zanoni enum pipe pipe, bool enable) 2338664281bSPaulo Zanoni { 2348664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2358664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2368664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2378664281bSPaulo Zanoni unsigned long flags; 2388664281bSPaulo Zanoni bool ret; 2398664281bSPaulo Zanoni 2408664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 2418664281bSPaulo Zanoni 2428664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 2438664281bSPaulo Zanoni 2448664281bSPaulo Zanoni if (enable == ret) 2458664281bSPaulo Zanoni goto done; 2468664281bSPaulo Zanoni 2478664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 2488664281bSPaulo Zanoni 2498664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 2508664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 2518664281bSPaulo Zanoni else if (IS_GEN7(dev)) 2528664281bSPaulo Zanoni ivybridge_set_fifo_underrun_reporting(dev, enable); 2538664281bSPaulo Zanoni 2548664281bSPaulo Zanoni done: 2558664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 2568664281bSPaulo Zanoni return ret; 2578664281bSPaulo Zanoni } 2588664281bSPaulo Zanoni 2598664281bSPaulo Zanoni /** 2608664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 2618664281bSPaulo Zanoni * @dev: drm device 2628664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 2638664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2648664281bSPaulo Zanoni * 2658664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 2668664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 2678664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 2688664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 2698664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 2708664281bSPaulo Zanoni * 2718664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 2728664281bSPaulo Zanoni */ 2738664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 2748664281bSPaulo Zanoni enum transcoder pch_transcoder, 2758664281bSPaulo Zanoni bool enable) 2768664281bSPaulo Zanoni { 2778664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2788664281bSPaulo Zanoni enum pipe p; 2798664281bSPaulo Zanoni struct drm_crtc *crtc; 2808664281bSPaulo Zanoni struct intel_crtc *intel_crtc; 2818664281bSPaulo Zanoni unsigned long flags; 2828664281bSPaulo Zanoni bool ret; 2838664281bSPaulo Zanoni 2848664281bSPaulo Zanoni if (HAS_PCH_LPT(dev)) { 2858664281bSPaulo Zanoni crtc = NULL; 2868664281bSPaulo Zanoni for_each_pipe(p) { 2878664281bSPaulo Zanoni struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 2888664281bSPaulo Zanoni if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 2898664281bSPaulo Zanoni crtc = c; 2908664281bSPaulo Zanoni break; 2918664281bSPaulo Zanoni } 2928664281bSPaulo Zanoni } 2938664281bSPaulo Zanoni if (!crtc) { 2948664281bSPaulo Zanoni DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n"); 2958664281bSPaulo Zanoni return false; 2968664281bSPaulo Zanoni } 2978664281bSPaulo Zanoni } else { 2988664281bSPaulo Zanoni crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 2998664281bSPaulo Zanoni } 3008664281bSPaulo Zanoni intel_crtc = to_intel_crtc(crtc); 3018664281bSPaulo Zanoni 3028664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3038664281bSPaulo Zanoni 3048664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 3058664281bSPaulo Zanoni 3068664281bSPaulo Zanoni if (enable == ret) 3078664281bSPaulo Zanoni goto done; 3088664281bSPaulo Zanoni 3098664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 3108664281bSPaulo Zanoni 3118664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 3128664281bSPaulo Zanoni ibx_set_fifo_underrun_reporting(intel_crtc, enable); 3138664281bSPaulo Zanoni else 3148664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 3158664281bSPaulo Zanoni 3168664281bSPaulo Zanoni done: 3178664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3188664281bSPaulo Zanoni return ret; 3198664281bSPaulo Zanoni } 3208664281bSPaulo Zanoni 3218664281bSPaulo Zanoni 3227c463586SKeith Packard void 3237c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3247c463586SKeith Packard { 3259db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 32646c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3277c463586SKeith Packard 32846c06a30SVille Syrjälä if ((pipestat & mask) == mask) 32946c06a30SVille Syrjälä return; 33046c06a30SVille Syrjälä 3317c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 33246c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 33346c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3343143a2bfSChris Wilson POSTING_READ(reg); 3357c463586SKeith Packard } 3367c463586SKeith Packard 3377c463586SKeith Packard void 3387c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3397c463586SKeith Packard { 3409db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 34146c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3427c463586SKeith Packard 34346c06a30SVille Syrjälä if ((pipestat & mask) == 0) 34446c06a30SVille Syrjälä return; 34546c06a30SVille Syrjälä 34646c06a30SVille Syrjälä pipestat &= ~mask; 34746c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3483143a2bfSChris Wilson POSTING_READ(reg); 3497c463586SKeith Packard } 3507c463586SKeith Packard 351c0e09200SDave Airlie /** 352f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 35301c66889SZhao Yakui */ 354f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 35501c66889SZhao Yakui { 3561ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 3571ec14ad3SChris Wilson unsigned long irqflags; 3581ec14ad3SChris Wilson 359f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 360f49e38ddSJani Nikula return; 361f49e38ddSJani Nikula 3621ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 36301c66889SZhao Yakui 364f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 365a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 366f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 3671ec14ad3SChris Wilson 3681ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 36901c66889SZhao Yakui } 37001c66889SZhao Yakui 37101c66889SZhao Yakui /** 3720a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 3730a3e67a4SJesse Barnes * @dev: DRM device 3740a3e67a4SJesse Barnes * @pipe: pipe to check 3750a3e67a4SJesse Barnes * 3760a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 3770a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 3780a3e67a4SJesse Barnes * before reading such registers if unsure. 3790a3e67a4SJesse Barnes */ 3800a3e67a4SJesse Barnes static int 3810a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 3820a3e67a4SJesse Barnes { 3830a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 384702e7a56SPaulo Zanoni 385a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 386a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 387a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 388a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 38971f8ba6bSPaulo Zanoni 390a01025afSDaniel Vetter return intel_crtc->active; 391a01025afSDaniel Vetter } else { 392a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 393a01025afSDaniel Vetter } 3940a3e67a4SJesse Barnes } 3950a3e67a4SJesse Barnes 39642f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 39742f52ef8SKeith Packard * we use as a pipe index 39842f52ef8SKeith Packard */ 399f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 4000a3e67a4SJesse Barnes { 4010a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4020a3e67a4SJesse Barnes unsigned long high_frame; 4030a3e67a4SJesse Barnes unsigned long low_frame; 4045eddb70bSChris Wilson u32 high1, high2, low; 4050a3e67a4SJesse Barnes 4060a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 40744d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4089db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4090a3e67a4SJesse Barnes return 0; 4100a3e67a4SJesse Barnes } 4110a3e67a4SJesse Barnes 4129db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 4139db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 4145eddb70bSChris Wilson 4150a3e67a4SJesse Barnes /* 4160a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 4170a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 4180a3e67a4SJesse Barnes * register. 4190a3e67a4SJesse Barnes */ 4200a3e67a4SJesse Barnes do { 4215eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4225eddb70bSChris Wilson low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 4235eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4240a3e67a4SJesse Barnes } while (high1 != high2); 4250a3e67a4SJesse Barnes 4265eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 4275eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 4285eddb70bSChris Wilson return (high1 << 8) | low; 4290a3e67a4SJesse Barnes } 4300a3e67a4SJesse Barnes 431f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 4329880b7a5SJesse Barnes { 4339880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4349db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 4359880b7a5SJesse Barnes 4369880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 43744d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4389db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4399880b7a5SJesse Barnes return 0; 4409880b7a5SJesse Barnes } 4419880b7a5SJesse Barnes 4429880b7a5SJesse Barnes return I915_READ(reg); 4439880b7a5SJesse Barnes } 4449880b7a5SJesse Barnes 445f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 4460af7e4dfSMario Kleiner int *vpos, int *hpos) 4470af7e4dfSMario Kleiner { 4480af7e4dfSMario Kleiner drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4490af7e4dfSMario Kleiner u32 vbl = 0, position = 0; 4500af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 4510af7e4dfSMario Kleiner bool in_vbl = true; 4520af7e4dfSMario Kleiner int ret = 0; 453fe2b8f9dSPaulo Zanoni enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 454fe2b8f9dSPaulo Zanoni pipe); 4550af7e4dfSMario Kleiner 4560af7e4dfSMario Kleiner if (!i915_pipe_enabled(dev, pipe)) { 4570af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 4589db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4590af7e4dfSMario Kleiner return 0; 4600af7e4dfSMario Kleiner } 4610af7e4dfSMario Kleiner 4620af7e4dfSMario Kleiner /* Get vtotal. */ 463fe2b8f9dSPaulo Zanoni vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 4640af7e4dfSMario Kleiner 4650af7e4dfSMario Kleiner if (INTEL_INFO(dev)->gen >= 4) { 4660af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 4670af7e4dfSMario Kleiner * scanout position from Display scan line register. 4680af7e4dfSMario Kleiner */ 4690af7e4dfSMario Kleiner position = I915_READ(PIPEDSL(pipe)); 4700af7e4dfSMario Kleiner 4710af7e4dfSMario Kleiner /* Decode into vertical scanout position. Don't have 4720af7e4dfSMario Kleiner * horizontal scanout position. 4730af7e4dfSMario Kleiner */ 4740af7e4dfSMario Kleiner *vpos = position & 0x1fff; 4750af7e4dfSMario Kleiner *hpos = 0; 4760af7e4dfSMario Kleiner } else { 4770af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 4780af7e4dfSMario Kleiner * We can split this into vertical and horizontal 4790af7e4dfSMario Kleiner * scanout position. 4800af7e4dfSMario Kleiner */ 4810af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 4820af7e4dfSMario Kleiner 483fe2b8f9dSPaulo Zanoni htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 4840af7e4dfSMario Kleiner *vpos = position / htotal; 4850af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 4860af7e4dfSMario Kleiner } 4870af7e4dfSMario Kleiner 4880af7e4dfSMario Kleiner /* Query vblank area. */ 489fe2b8f9dSPaulo Zanoni vbl = I915_READ(VBLANK(cpu_transcoder)); 4900af7e4dfSMario Kleiner 4910af7e4dfSMario Kleiner /* Test position against vblank region. */ 4920af7e4dfSMario Kleiner vbl_start = vbl & 0x1fff; 4930af7e4dfSMario Kleiner vbl_end = (vbl >> 16) & 0x1fff; 4940af7e4dfSMario Kleiner 4950af7e4dfSMario Kleiner if ((*vpos < vbl_start) || (*vpos > vbl_end)) 4960af7e4dfSMario Kleiner in_vbl = false; 4970af7e4dfSMario Kleiner 4980af7e4dfSMario Kleiner /* Inside "upper part" of vblank area? Apply corrective offset: */ 4990af7e4dfSMario Kleiner if (in_vbl && (*vpos >= vbl_start)) 5000af7e4dfSMario Kleiner *vpos = *vpos - vtotal; 5010af7e4dfSMario Kleiner 5020af7e4dfSMario Kleiner /* Readouts valid? */ 5030af7e4dfSMario Kleiner if (vbl > 0) 5040af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 5050af7e4dfSMario Kleiner 5060af7e4dfSMario Kleiner /* In vblank? */ 5070af7e4dfSMario Kleiner if (in_vbl) 5080af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 5090af7e4dfSMario Kleiner 5100af7e4dfSMario Kleiner return ret; 5110af7e4dfSMario Kleiner } 5120af7e4dfSMario Kleiner 513f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 5140af7e4dfSMario Kleiner int *max_error, 5150af7e4dfSMario Kleiner struct timeval *vblank_time, 5160af7e4dfSMario Kleiner unsigned flags) 5170af7e4dfSMario Kleiner { 5184041b853SChris Wilson struct drm_crtc *crtc; 5190af7e4dfSMario Kleiner 5207eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 5214041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5220af7e4dfSMario Kleiner return -EINVAL; 5230af7e4dfSMario Kleiner } 5240af7e4dfSMario Kleiner 5250af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 5264041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 5274041b853SChris Wilson if (crtc == NULL) { 5284041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5294041b853SChris Wilson return -EINVAL; 5304041b853SChris Wilson } 5314041b853SChris Wilson 5324041b853SChris Wilson if (!crtc->enabled) { 5334041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 5344041b853SChris Wilson return -EBUSY; 5354041b853SChris Wilson } 5360af7e4dfSMario Kleiner 5370af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 5384041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 5394041b853SChris Wilson vblank_time, flags, 5404041b853SChris Wilson crtc); 5410af7e4dfSMario Kleiner } 5420af7e4dfSMario Kleiner 543321a1b30SEgbert Eich static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 544321a1b30SEgbert Eich { 545321a1b30SEgbert Eich enum drm_connector_status old_status; 546321a1b30SEgbert Eich 547321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 548321a1b30SEgbert Eich old_status = connector->status; 549321a1b30SEgbert Eich 550321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 551321a1b30SEgbert Eich DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 552321a1b30SEgbert Eich connector->base.id, 553321a1b30SEgbert Eich drm_get_connector_name(connector), 554321a1b30SEgbert Eich old_status, connector->status); 555321a1b30SEgbert Eich return (old_status != connector->status); 556321a1b30SEgbert Eich } 557321a1b30SEgbert Eich 5585ca58282SJesse Barnes /* 5595ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 5605ca58282SJesse Barnes */ 561ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 562ac4c16c5SEgbert Eich 5635ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 5645ca58282SJesse Barnes { 5655ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 5665ca58282SJesse Barnes hotplug_work); 5675ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 568c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 569cd569aedSEgbert Eich struct intel_connector *intel_connector; 570cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 571cd569aedSEgbert Eich struct drm_connector *connector; 572cd569aedSEgbert Eich unsigned long irqflags; 573cd569aedSEgbert Eich bool hpd_disabled = false; 574321a1b30SEgbert Eich bool changed = false; 575142e2398SEgbert Eich u32 hpd_event_bits; 5765ca58282SJesse Barnes 57752d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 57852d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 57952d7ecedSDaniel Vetter return; 58052d7ecedSDaniel Vetter 581a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 582e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 583e67189abSJesse Barnes 584cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 585142e2398SEgbert Eich 586142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 587142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 588cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 589cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 590cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 591cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 592cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 593cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 594cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 595cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 596cd569aedSEgbert Eich drm_get_connector_name(connector)); 597cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 598cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 599cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 600cd569aedSEgbert Eich hpd_disabled = true; 601cd569aedSEgbert Eich } 602142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 603142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 604142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 605142e2398SEgbert Eich } 606cd569aedSEgbert Eich } 607cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 608cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 609cd569aedSEgbert Eich * some connectors */ 610ac4c16c5SEgbert Eich if (hpd_disabled) { 611cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 612ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 613ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 614ac4c16c5SEgbert Eich } 615cd569aedSEgbert Eich 616cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 617cd569aedSEgbert Eich 618321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 619321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 620321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 621321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 622cd569aedSEgbert Eich if (intel_encoder->hot_plug) 623cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 624321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 625321a1b30SEgbert Eich changed = true; 626321a1b30SEgbert Eich } 627321a1b30SEgbert Eich } 62840ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 62940ee3381SKeith Packard 630321a1b30SEgbert Eich if (changed) 631321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 6325ca58282SJesse Barnes } 6335ca58282SJesse Barnes 63473edd18fSDaniel Vetter static void ironlake_handle_rps_change(struct drm_device *dev) 635f97108d1SJesse Barnes { 636f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 637b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 6389270388eSDaniel Vetter u8 new_delay; 6399270388eSDaniel Vetter unsigned long flags; 6409270388eSDaniel Vetter 6419270388eSDaniel Vetter spin_lock_irqsave(&mchdev_lock, flags); 642f97108d1SJesse Barnes 64373edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 64473edd18fSDaniel Vetter 64520e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 6469270388eSDaniel Vetter 6477648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 648b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 649b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 650f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 651f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 652f97108d1SJesse Barnes 653f97108d1SJesse Barnes /* Handle RCS change request from hw */ 654b5b72e89SMatthew Garrett if (busy_up > max_avg) { 65520e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 65620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 65720e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 65820e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 659b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 66020e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 66120e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 66220e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 66320e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 664f97108d1SJesse Barnes } 665f97108d1SJesse Barnes 6667648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 66720e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 668f97108d1SJesse Barnes 6699270388eSDaniel Vetter spin_unlock_irqrestore(&mchdev_lock, flags); 6709270388eSDaniel Vetter 671f97108d1SJesse Barnes return; 672f97108d1SJesse Barnes } 673f97108d1SJesse Barnes 674549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 675549f7365SChris Wilson struct intel_ring_buffer *ring) 676549f7365SChris Wilson { 677549f7365SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 6789862e600SChris Wilson 679475553deSChris Wilson if (ring->obj == NULL) 680475553deSChris Wilson return; 681475553deSChris Wilson 682b2eadbc8SChris Wilson trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 6839862e600SChris Wilson 684549f7365SChris Wilson wake_up_all(&ring->irq_queue); 6853e0dc6b0SBen Widawsky if (i915_enable_hangcheck) { 68699584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 687cecc21feSChris Wilson round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 6883e0dc6b0SBen Widawsky } 689549f7365SChris Wilson } 690549f7365SChris Wilson 6914912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 6923b8d8d91SJesse Barnes { 6934912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 694c6a828d3SDaniel Vetter rps.work); 6954912d041SBen Widawsky u32 pm_iir, pm_imr; 6967b9e0ae6SChris Wilson u8 new_delay; 6973b8d8d91SJesse Barnes 698c6a828d3SDaniel Vetter spin_lock_irq(&dev_priv->rps.lock); 699c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 700c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 7014912d041SBen Widawsky pm_imr = I915_READ(GEN6_PMIMR); 7024848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 7034848405cSBen Widawsky I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 704c6a828d3SDaniel Vetter spin_unlock_irq(&dev_priv->rps.lock); 7054912d041SBen Widawsky 7064848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 7073b8d8d91SJesse Barnes return; 7083b8d8d91SJesse Barnes 7094fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 7107b9e0ae6SChris Wilson 7117b9e0ae6SChris Wilson if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 712c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay + 1; 7137b9e0ae6SChris Wilson else 714c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay - 1; 7153b8d8d91SJesse Barnes 71679249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 71779249636SBen Widawsky * interrupt 71879249636SBen Widawsky */ 71979249636SBen Widawsky if (!(new_delay > dev_priv->rps.max_delay || 72079249636SBen Widawsky new_delay < dev_priv->rps.min_delay)) { 7210a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 7220a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 7230a073b84SJesse Barnes else 7244912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 72579249636SBen Widawsky } 7263b8d8d91SJesse Barnes 72752ceb908SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) { 72852ceb908SJesse Barnes /* 72952ceb908SJesse Barnes * On VLV, when we enter RC6 we may not be at the minimum 73052ceb908SJesse Barnes * voltage level, so arm a timer to check. It should only 73152ceb908SJesse Barnes * fire when there's activity or once after we've entered 73252ceb908SJesse Barnes * RC6, and then won't be re-armed until the next RPS interrupt. 73352ceb908SJesse Barnes */ 73452ceb908SJesse Barnes mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 73552ceb908SJesse Barnes msecs_to_jiffies(100)); 73652ceb908SJesse Barnes } 73752ceb908SJesse Barnes 7384fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 7393b8d8d91SJesse Barnes } 7403b8d8d91SJesse Barnes 741e3689190SBen Widawsky 742e3689190SBen Widawsky /** 743e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 744e3689190SBen Widawsky * occurred. 745e3689190SBen Widawsky * @work: workqueue struct 746e3689190SBen Widawsky * 747e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 748e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 749e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 750e3689190SBen Widawsky */ 751e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 752e3689190SBen Widawsky { 753e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 754a4da4fa4SDaniel Vetter l3_parity.error_work); 755e3689190SBen Widawsky u32 error_status, row, bank, subbank; 756e3689190SBen Widawsky char *parity_event[5]; 757e3689190SBen Widawsky uint32_t misccpctl; 758e3689190SBen Widawsky unsigned long flags; 759e3689190SBen Widawsky 760e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 761e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 762e3689190SBen Widawsky * any time we access those registers. 763e3689190SBen Widawsky */ 764e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 765e3689190SBen Widawsky 766e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 767e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 768e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 769e3689190SBen Widawsky 770e3689190SBen Widawsky error_status = I915_READ(GEN7_L3CDERRST1); 771e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 772e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 773e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 774e3689190SBen Widawsky 775e3689190SBen Widawsky I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 776e3689190SBen Widawsky GEN7_L3CDERRST1_ENABLE); 777e3689190SBen Widawsky POSTING_READ(GEN7_L3CDERRST1); 778e3689190SBen Widawsky 779e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 780e3689190SBen Widawsky 781e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 782cc609d5dSBen Widawsky dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 783e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 784e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 785e3689190SBen Widawsky 786e3689190SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 787e3689190SBen Widawsky 788e3689190SBen Widawsky parity_event[0] = "L3_PARITY_ERROR=1"; 789e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 790e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 791e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 792e3689190SBen Widawsky parity_event[4] = NULL; 793e3689190SBen Widawsky 794e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 795e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 796e3689190SBen Widawsky 797e3689190SBen Widawsky DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 798e3689190SBen Widawsky row, bank, subbank); 799e3689190SBen Widawsky 800e3689190SBen Widawsky kfree(parity_event[3]); 801e3689190SBen Widawsky kfree(parity_event[2]); 802e3689190SBen Widawsky kfree(parity_event[1]); 803e3689190SBen Widawsky } 804e3689190SBen Widawsky 805d2ba8470SDaniel Vetter static void ivybridge_handle_parity_error(struct drm_device *dev) 806e3689190SBen Widawsky { 807e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 808e3689190SBen Widawsky unsigned long flags; 809e3689190SBen Widawsky 810e1ef7cc2SBen Widawsky if (!HAS_L3_GPU_CACHE(dev)) 811e3689190SBen Widawsky return; 812e3689190SBen Widawsky 813e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 814cc609d5dSBen Widawsky dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 815e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 816e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 817e3689190SBen Widawsky 818a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 819e3689190SBen Widawsky } 820e3689190SBen Widawsky 821e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 822e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 823e7b4c6b1SDaniel Vetter u32 gt_iir) 824e7b4c6b1SDaniel Vetter { 825e7b4c6b1SDaniel Vetter 826cc609d5dSBen Widawsky if (gt_iir & 827cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 828e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 829cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 830e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 831cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 832e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 833e7b4c6b1SDaniel Vetter 834cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 835cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 836cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 837e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 838e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 839e7b4c6b1SDaniel Vetter } 840e3689190SBen Widawsky 841cc609d5dSBen Widawsky if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 842e3689190SBen Widawsky ivybridge_handle_parity_error(dev); 843e7b4c6b1SDaniel Vetter } 844e7b4c6b1SDaniel Vetter 845baf02a1fSBen Widawsky /* Legacy way of handling PM interrupts */ 846fc6826d1SChris Wilson static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 847fc6826d1SChris Wilson u32 pm_iir) 848fc6826d1SChris Wilson { 849fc6826d1SChris Wilson unsigned long flags; 850fc6826d1SChris Wilson 851fc6826d1SChris Wilson /* 852fc6826d1SChris Wilson * IIR bits should never already be set because IMR should 853fc6826d1SChris Wilson * prevent an interrupt from being shown in IIR. The warning 854fc6826d1SChris Wilson * displays a case where we've unsafely cleared 855c6a828d3SDaniel Vetter * dev_priv->rps.pm_iir. Although missing an interrupt of the same 856fc6826d1SChris Wilson * type is not a problem, it displays a problem in the logic. 857fc6826d1SChris Wilson * 858c6a828d3SDaniel Vetter * The mask bit in IMR is cleared by dev_priv->rps.work. 859fc6826d1SChris Wilson */ 860fc6826d1SChris Wilson 861c6a828d3SDaniel Vetter spin_lock_irqsave(&dev_priv->rps.lock, flags); 862c6a828d3SDaniel Vetter dev_priv->rps.pm_iir |= pm_iir; 863c6a828d3SDaniel Vetter I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 864fc6826d1SChris Wilson POSTING_READ(GEN6_PMIMR); 865c6a828d3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 866fc6826d1SChris Wilson 867c6a828d3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 868fc6826d1SChris Wilson } 869fc6826d1SChris Wilson 870b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 871b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 872b543fb04SEgbert Eich 873cd569aedSEgbert Eich static inline bool hotplug_irq_storm_detect(struct drm_device *dev, 874b543fb04SEgbert Eich u32 hotplug_trigger, 875b543fb04SEgbert Eich const u32 *hpd) 876b543fb04SEgbert Eich { 877b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 878b543fb04SEgbert Eich unsigned long irqflags; 879b543fb04SEgbert Eich int i; 880cd569aedSEgbert Eich bool ret = false; 881b543fb04SEgbert Eich 882b543fb04SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 883b543fb04SEgbert Eich 884b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 885821450c6SEgbert Eich 886b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 887b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 888b543fb04SEgbert Eich continue; 889b543fb04SEgbert Eich 890bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 891b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 892b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 893b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 894b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 895b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 896b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 897b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 898142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 899b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 900cd569aedSEgbert Eich ret = true; 901b543fb04SEgbert Eich } else { 902b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 903b543fb04SEgbert Eich } 904b543fb04SEgbert Eich } 905b543fb04SEgbert Eich 906b543fb04SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 907cd569aedSEgbert Eich 908cd569aedSEgbert Eich return ret; 909b543fb04SEgbert Eich } 910b543fb04SEgbert Eich 911515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 912515ac2bbSDaniel Vetter { 91328c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 91428c70f16SDaniel Vetter 91528c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 916515ac2bbSDaniel Vetter } 917515ac2bbSDaniel Vetter 918ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 919ce99c256SDaniel Vetter { 9209ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 9219ee32feaSDaniel Vetter 9229ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 923ce99c256SDaniel Vetter } 924ce99c256SDaniel Vetter 925baf02a1fSBen Widawsky /* Unlike gen6_queue_rps_work() from which this function is originally derived, 926baf02a1fSBen Widawsky * we must be able to deal with other PM interrupts. This is complicated because 927baf02a1fSBen Widawsky * of the way in which we use the masks to defer the RPS work (which for 928baf02a1fSBen Widawsky * posterity is necessary because of forcewake). 929baf02a1fSBen Widawsky */ 930baf02a1fSBen Widawsky static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 931baf02a1fSBen Widawsky u32 pm_iir) 932baf02a1fSBen Widawsky { 933baf02a1fSBen Widawsky unsigned long flags; 934baf02a1fSBen Widawsky 935baf02a1fSBen Widawsky spin_lock_irqsave(&dev_priv->rps.lock, flags); 9364848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 937baf02a1fSBen Widawsky if (dev_priv->rps.pm_iir) { 938baf02a1fSBen Widawsky I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 939baf02a1fSBen Widawsky /* never want to mask useful interrupts. (also posting read) */ 9404848405cSBen Widawsky WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 941baf02a1fSBen Widawsky /* TODO: if queue_work is slow, move it out of the spinlock */ 942baf02a1fSBen Widawsky queue_work(dev_priv->wq, &dev_priv->rps.work); 943baf02a1fSBen Widawsky } 944baf02a1fSBen Widawsky spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 945baf02a1fSBen Widawsky 94612638c57SBen Widawsky if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 94712638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 94812638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 94912638c57SBen Widawsky 95012638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 95112638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 95212638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 95312638c57SBen Widawsky } 95412638c57SBen Widawsky } 955baf02a1fSBen Widawsky } 956baf02a1fSBen Widawsky 957ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 9587e231dbeSJesse Barnes { 9597e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 9607e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 9617e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 9627e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 9637e231dbeSJesse Barnes unsigned long irqflags; 9647e231dbeSJesse Barnes int pipe; 9657e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 9667e231dbeSJesse Barnes 9677e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 9687e231dbeSJesse Barnes 9697e231dbeSJesse Barnes while (true) { 9707e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 9717e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 9727e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 9737e231dbeSJesse Barnes 9747e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 9757e231dbeSJesse Barnes goto out; 9767e231dbeSJesse Barnes 9777e231dbeSJesse Barnes ret = IRQ_HANDLED; 9787e231dbeSJesse Barnes 979e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 9807e231dbeSJesse Barnes 9817e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 9827e231dbeSJesse Barnes for_each_pipe(pipe) { 9837e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 9847e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 9857e231dbeSJesse Barnes 9867e231dbeSJesse Barnes /* 9877e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 9887e231dbeSJesse Barnes */ 9897e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 9907e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 9917e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 9927e231dbeSJesse Barnes pipe_name(pipe)); 9937e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 9947e231dbeSJesse Barnes } 9957e231dbeSJesse Barnes } 9967e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 9977e231dbeSJesse Barnes 99831acc7f5SJesse Barnes for_each_pipe(pipe) { 99931acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 100031acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 100131acc7f5SJesse Barnes 100231acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 100331acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 100431acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 100531acc7f5SJesse Barnes } 100631acc7f5SJesse Barnes } 100731acc7f5SJesse Barnes 10087e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 10097e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 10107e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1011b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 10127e231dbeSJesse Barnes 10137e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 10147e231dbeSJesse Barnes hotplug_status); 1015b543fb04SEgbert Eich if (hotplug_trigger) { 1016cd569aedSEgbert Eich if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 1017cd569aedSEgbert Eich i915_hpd_irq_setup(dev); 10187e231dbeSJesse Barnes queue_work(dev_priv->wq, 10197e231dbeSJesse Barnes &dev_priv->hotplug_work); 1020b543fb04SEgbert Eich } 10217e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 10227e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 10237e231dbeSJesse Barnes } 10247e231dbeSJesse Barnes 1025515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1026515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 10277e231dbeSJesse Barnes 10284848405cSBen Widawsky if (pm_iir & GEN6_PM_RPS_EVENTS) 1029fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 10307e231dbeSJesse Barnes 10317e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 10327e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 10337e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 10347e231dbeSJesse Barnes } 10357e231dbeSJesse Barnes 10367e231dbeSJesse Barnes out: 10377e231dbeSJesse Barnes return ret; 10387e231dbeSJesse Barnes } 10397e231dbeSJesse Barnes 104023e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1041776ad806SJesse Barnes { 1042776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10439db4a9c7SJesse Barnes int pipe; 1044b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1045776ad806SJesse Barnes 1046b543fb04SEgbert Eich if (hotplug_trigger) { 1047cd569aedSEgbert Eich if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx)) 1048cd569aedSEgbert Eich ibx_hpd_irq_setup(dev); 104976e43830SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1050b543fb04SEgbert Eich } 1051cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1052cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1053776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1054cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1055cfc33bf7SVille Syrjälä port_name(port)); 1056cfc33bf7SVille Syrjälä } 1057776ad806SJesse Barnes 1058ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1059ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1060ce99c256SDaniel Vetter 1061776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1062515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1063776ad806SJesse Barnes 1064776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1065776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1066776ad806SJesse Barnes 1067776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1068776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1069776ad806SJesse Barnes 1070776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1071776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1072776ad806SJesse Barnes 10739db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 10749db4a9c7SJesse Barnes for_each_pipe(pipe) 10759db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 10769db4a9c7SJesse Barnes pipe_name(pipe), 10779db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1078776ad806SJesse Barnes 1079776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1080776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1081776ad806SJesse Barnes 1082776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1083776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1084776ad806SJesse Barnes 1085776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 10868664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 10878664281bSPaulo Zanoni false)) 10888664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 10898664281bSPaulo Zanoni 10908664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 10918664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 10928664281bSPaulo Zanoni false)) 10938664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 10948664281bSPaulo Zanoni } 10958664281bSPaulo Zanoni 10968664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 10978664281bSPaulo Zanoni { 10988664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 10998664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 11008664281bSPaulo Zanoni 1101de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1102de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1103de032bf4SPaulo Zanoni 11048664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 11058664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 11068664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 11078664281bSPaulo Zanoni 11088664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 11098664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 11108664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 11118664281bSPaulo Zanoni 11128664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 11138664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 11148664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 11158664281bSPaulo Zanoni 11168664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 11178664281bSPaulo Zanoni } 11188664281bSPaulo Zanoni 11198664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 11208664281bSPaulo Zanoni { 11218664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11228664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 11238664281bSPaulo Zanoni 1124de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1125de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1126de032bf4SPaulo Zanoni 11278664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 11288664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11298664281bSPaulo Zanoni false)) 11308664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11318664281bSPaulo Zanoni 11328664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 11338664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11348664281bSPaulo Zanoni false)) 11358664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11368664281bSPaulo Zanoni 11378664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 11388664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 11398664281bSPaulo Zanoni false)) 11408664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 11418664281bSPaulo Zanoni 11428664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1143776ad806SJesse Barnes } 1144776ad806SJesse Barnes 114523e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 114623e81d69SAdam Jackson { 114723e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 114823e81d69SAdam Jackson int pipe; 1149b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 115023e81d69SAdam Jackson 1151b543fb04SEgbert Eich if (hotplug_trigger) { 1152cd569aedSEgbert Eich if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt)) 1153cd569aedSEgbert Eich ibx_hpd_irq_setup(dev); 115476e43830SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->hotplug_work); 1155b543fb04SEgbert Eich } 1156cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1157cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 115823e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1159cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1160cfc33bf7SVille Syrjälä port_name(port)); 1161cfc33bf7SVille Syrjälä } 116223e81d69SAdam Jackson 116323e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1164ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 116523e81d69SAdam Jackson 116623e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1167515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 116823e81d69SAdam Jackson 116923e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 117023e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 117123e81d69SAdam Jackson 117223e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 117323e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 117423e81d69SAdam Jackson 117523e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 117623e81d69SAdam Jackson for_each_pipe(pipe) 117723e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 117823e81d69SAdam Jackson pipe_name(pipe), 117923e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 11808664281bSPaulo Zanoni 11818664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 11828664281bSPaulo Zanoni cpt_serr_int_handler(dev); 118323e81d69SAdam Jackson } 118423e81d69SAdam Jackson 1185ff1f525eSDaniel Vetter static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1186b1f14ad0SJesse Barnes { 1187b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1188b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1189ab5c608bSBen Widawsky u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 11900e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 11910e43406bSChris Wilson int i; 1192b1f14ad0SJesse Barnes 1193b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1194b1f14ad0SJesse Barnes 11958664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 11968664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 11978664281bSPaulo Zanoni if (IS_HASWELL(dev) && 11988664281bSPaulo Zanoni (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 11998664281bSPaulo Zanoni DRM_ERROR("Unclaimed register before interrupt\n"); 12008664281bSPaulo Zanoni I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 12018664281bSPaulo Zanoni } 12028664281bSPaulo Zanoni 1203b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1204b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1205b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 12060e43406bSChris Wilson 120744498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 120844498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 120944498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 121044498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 121144498aeaSPaulo Zanoni * due to its back queue). */ 1212ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 121344498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 121444498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 121544498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1216ab5c608bSBen Widawsky } 121744498aeaSPaulo Zanoni 12188664281bSPaulo Zanoni /* On Haswell, also mask ERR_INT because we don't want to risk 12198664281bSPaulo Zanoni * generating "unclaimed register" interrupts from inside the interrupt 12208664281bSPaulo Zanoni * handler. */ 12218664281bSPaulo Zanoni if (IS_HASWELL(dev)) 12228664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 12238664281bSPaulo Zanoni 12240e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 12250e43406bSChris Wilson if (gt_iir) { 12260e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 12270e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 12280e43406bSChris Wilson ret = IRQ_HANDLED; 12290e43406bSChris Wilson } 1230b1f14ad0SJesse Barnes 1231b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 12320e43406bSChris Wilson if (de_iir) { 12338664281bSPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 12348664281bSPaulo Zanoni ivb_err_int_handler(dev); 12358664281bSPaulo Zanoni 1236ce99c256SDaniel Vetter if (de_iir & DE_AUX_CHANNEL_A_IVB) 1237ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1238ce99c256SDaniel Vetter 1239b1f14ad0SJesse Barnes if (de_iir & DE_GSE_IVB) 124081a07809SJani Nikula intel_opregion_asle_intr(dev); 1241b1f14ad0SJesse Barnes 12420e43406bSChris Wilson for (i = 0; i < 3; i++) { 124374d44445SDaniel Vetter if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 124474d44445SDaniel Vetter drm_handle_vblank(dev, i); 12450e43406bSChris Wilson if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 12460e43406bSChris Wilson intel_prepare_page_flip(dev, i); 12470e43406bSChris Wilson intel_finish_page_flip_plane(dev, i); 1248b1f14ad0SJesse Barnes } 1249b1f14ad0SJesse Barnes } 1250b1f14ad0SJesse Barnes 1251b1f14ad0SJesse Barnes /* check event from PCH */ 1252ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 12530e43406bSChris Wilson u32 pch_iir = I915_READ(SDEIIR); 12540e43406bSChris Wilson 125523e81d69SAdam Jackson cpt_irq_handler(dev, pch_iir); 12560e43406bSChris Wilson 12570e43406bSChris Wilson /* clear PCH hotplug event before clear CPU irq */ 12580e43406bSChris Wilson I915_WRITE(SDEIIR, pch_iir); 1259b1f14ad0SJesse Barnes } 1260b1f14ad0SJesse Barnes 12610e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 12620e43406bSChris Wilson ret = IRQ_HANDLED; 12630e43406bSChris Wilson } 12640e43406bSChris Wilson 12650e43406bSChris Wilson pm_iir = I915_READ(GEN6_PMIIR); 12660e43406bSChris Wilson if (pm_iir) { 1267baf02a1fSBen Widawsky if (IS_HASWELL(dev)) 1268baf02a1fSBen Widawsky hsw_pm_irq_handler(dev_priv, pm_iir); 12694848405cSBen Widawsky else if (pm_iir & GEN6_PM_RPS_EVENTS) 1270fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 1271b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 12720e43406bSChris Wilson ret = IRQ_HANDLED; 12730e43406bSChris Wilson } 1274b1f14ad0SJesse Barnes 12758664281bSPaulo Zanoni if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev)) 12768664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 12778664281bSPaulo Zanoni 1278b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1279b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1280ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 128144498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 128244498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1283ab5c608bSBen Widawsky } 1284b1f14ad0SJesse Barnes 1285b1f14ad0SJesse Barnes return ret; 1286b1f14ad0SJesse Barnes } 1287b1f14ad0SJesse Barnes 1288e7b4c6b1SDaniel Vetter static void ilk_gt_irq_handler(struct drm_device *dev, 1289e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 1290e7b4c6b1SDaniel Vetter u32 gt_iir) 1291e7b4c6b1SDaniel Vetter { 1292cc609d5dSBen Widawsky if (gt_iir & 1293cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1294e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 1295cc609d5dSBen Widawsky if (gt_iir & ILK_BSD_USER_INTERRUPT) 1296e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 1297e7b4c6b1SDaniel Vetter } 1298e7b4c6b1SDaniel Vetter 1299ff1f525eSDaniel Vetter static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1300036a4a7dSZhenyu Wang { 13014697995bSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1302036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1303036a4a7dSZhenyu Wang int ret = IRQ_NONE; 130444498aeaSPaulo Zanoni u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 1305881f47b6SXiang, Haihao 13064697995bSJesse Barnes atomic_inc(&dev_priv->irq_received); 13074697995bSJesse Barnes 13082d109a84SZou, Nanhai /* disable master interrupt before clearing iir */ 13092d109a84SZou, Nanhai de_ier = I915_READ(DEIER); 13102d109a84SZou, Nanhai I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 13113143a2bfSChris Wilson POSTING_READ(DEIER); 13122d109a84SZou, Nanhai 131344498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 131444498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 131544498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 131644498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 131744498aeaSPaulo Zanoni * due to its back queue). */ 131844498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 131944498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 132044498aeaSPaulo Zanoni POSTING_READ(SDEIER); 132144498aeaSPaulo Zanoni 1322036a4a7dSZhenyu Wang de_iir = I915_READ(DEIIR); 1323036a4a7dSZhenyu Wang gt_iir = I915_READ(GTIIR); 13243b8d8d91SJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 1325036a4a7dSZhenyu Wang 1326acd15b6cSDaniel Vetter if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 1327c7c85101SZou Nan hai goto done; 1328036a4a7dSZhenyu Wang 1329036a4a7dSZhenyu Wang ret = IRQ_HANDLED; 1330036a4a7dSZhenyu Wang 1331e7b4c6b1SDaniel Vetter if (IS_GEN5(dev)) 1332e7b4c6b1SDaniel Vetter ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1333e7b4c6b1SDaniel Vetter else 1334e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 1335036a4a7dSZhenyu Wang 1336ce99c256SDaniel Vetter if (de_iir & DE_AUX_CHANNEL_A) 1337ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1338ce99c256SDaniel Vetter 133901c66889SZhao Yakui if (de_iir & DE_GSE) 134081a07809SJani Nikula intel_opregion_asle_intr(dev); 134101c66889SZhao Yakui 134274d44445SDaniel Vetter if (de_iir & DE_PIPEA_VBLANK) 134374d44445SDaniel Vetter drm_handle_vblank(dev, 0); 134474d44445SDaniel Vetter 134574d44445SDaniel Vetter if (de_iir & DE_PIPEB_VBLANK) 134674d44445SDaniel Vetter drm_handle_vblank(dev, 1); 134774d44445SDaniel Vetter 1348de032bf4SPaulo Zanoni if (de_iir & DE_POISON) 1349de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1350de032bf4SPaulo Zanoni 13518664281bSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 13528664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 13538664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 13548664281bSPaulo Zanoni 13558664281bSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 13568664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 13578664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 13588664281bSPaulo Zanoni 1359f072d2e7SZhenyu Wang if (de_iir & DE_PLANEA_FLIP_DONE) { 1360013d5aa2SJesse Barnes intel_prepare_page_flip(dev, 0); 13612bbda389SChris Wilson intel_finish_page_flip_plane(dev, 0); 1362013d5aa2SJesse Barnes } 1363013d5aa2SJesse Barnes 1364f072d2e7SZhenyu Wang if (de_iir & DE_PLANEB_FLIP_DONE) { 1365f072d2e7SZhenyu Wang intel_prepare_page_flip(dev, 1); 13662bbda389SChris Wilson intel_finish_page_flip_plane(dev, 1); 1367013d5aa2SJesse Barnes } 1368c062df61SLi Peng 1369c650156aSZhenyu Wang /* check event from PCH */ 1370776ad806SJesse Barnes if (de_iir & DE_PCH_EVENT) { 1371acd15b6cSDaniel Vetter u32 pch_iir = I915_READ(SDEIIR); 1372acd15b6cSDaniel Vetter 137323e81d69SAdam Jackson if (HAS_PCH_CPT(dev)) 137423e81d69SAdam Jackson cpt_irq_handler(dev, pch_iir); 137523e81d69SAdam Jackson else 137623e81d69SAdam Jackson ibx_irq_handler(dev, pch_iir); 1377acd15b6cSDaniel Vetter 1378acd15b6cSDaniel Vetter /* should clear PCH hotplug event before clear CPU irq */ 1379acd15b6cSDaniel Vetter I915_WRITE(SDEIIR, pch_iir); 1380776ad806SJesse Barnes } 1381c650156aSZhenyu Wang 138273edd18fSDaniel Vetter if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 138373edd18fSDaniel Vetter ironlake_handle_rps_change(dev); 1384f97108d1SJesse Barnes 13854848405cSBen Widawsky if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) 1386fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 13873b8d8d91SJesse Barnes 1388c7c85101SZou Nan hai I915_WRITE(GTIIR, gt_iir); 1389c7c85101SZou Nan hai I915_WRITE(DEIIR, de_iir); 13904912d041SBen Widawsky I915_WRITE(GEN6_PMIIR, pm_iir); 1391036a4a7dSZhenyu Wang 1392c7c85101SZou Nan hai done: 13932d109a84SZou, Nanhai I915_WRITE(DEIER, de_ier); 13943143a2bfSChris Wilson POSTING_READ(DEIER); 139544498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 139644498aeaSPaulo Zanoni POSTING_READ(SDEIER); 13972d109a84SZou, Nanhai 1398036a4a7dSZhenyu Wang return ret; 1399036a4a7dSZhenyu Wang } 1400036a4a7dSZhenyu Wang 14018a905236SJesse Barnes /** 14028a905236SJesse Barnes * i915_error_work_func - do process context error handling work 14038a905236SJesse Barnes * @work: work struct 14048a905236SJesse Barnes * 14058a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 14068a905236SJesse Barnes * was detected. 14078a905236SJesse Barnes */ 14088a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 14098a905236SJesse Barnes { 14101f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 14111f83fee0SDaniel Vetter work); 14121f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 14131f83fee0SDaniel Vetter gpu_error); 14148a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1415f69061beSDaniel Vetter struct intel_ring_buffer *ring; 1416f316a42cSBen Gamari char *error_event[] = { "ERROR=1", NULL }; 1417f316a42cSBen Gamari char *reset_event[] = { "RESET=1", NULL }; 1418f316a42cSBen Gamari char *reset_done_event[] = { "ERROR=0", NULL }; 1419f69061beSDaniel Vetter int i, ret; 14208a905236SJesse Barnes 1421f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 14228a905236SJesse Barnes 14237db0ba24SDaniel Vetter /* 14247db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 14257db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 14267db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 14277db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 14287db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 14297db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 14307db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 14317db0ba24SDaniel Vetter * work we don't need to worry about any other races. 14327db0ba24SDaniel Vetter */ 14337db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 143444d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 14357db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 14367db0ba24SDaniel Vetter reset_event); 14371f83fee0SDaniel Vetter 1438f69061beSDaniel Vetter ret = i915_reset(dev); 1439f69061beSDaniel Vetter 1440f69061beSDaniel Vetter if (ret == 0) { 1441f69061beSDaniel Vetter /* 1442f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1443f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1444f69061beSDaniel Vetter * complete. 1445f69061beSDaniel Vetter * 1446f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1447f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1448f69061beSDaniel Vetter * updates before 1449f69061beSDaniel Vetter * the counter increment. 1450f69061beSDaniel Vetter */ 1451f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1452f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1453f69061beSDaniel Vetter 1454f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1455f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 14561f83fee0SDaniel Vetter } else { 14571f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1458f316a42cSBen Gamari } 14591f83fee0SDaniel Vetter 1460f69061beSDaniel Vetter for_each_ring(ring, dev_priv, i) 1461f69061beSDaniel Vetter wake_up_all(&ring->irq_queue); 1462f69061beSDaniel Vetter 146396a02917SVille Syrjälä intel_display_handle_reset(dev); 146496a02917SVille Syrjälä 14651f83fee0SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 1466f316a42cSBen Gamari } 14678a905236SJesse Barnes } 14688a905236SJesse Barnes 146985f9e50dSDaniel Vetter /* NB: please notice the memset */ 147085f9e50dSDaniel Vetter static void i915_get_extra_instdone(struct drm_device *dev, 147185f9e50dSDaniel Vetter uint32_t *instdone) 147285f9e50dSDaniel Vetter { 147385f9e50dSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 147485f9e50dSDaniel Vetter memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 147585f9e50dSDaniel Vetter 147685f9e50dSDaniel Vetter switch(INTEL_INFO(dev)->gen) { 147785f9e50dSDaniel Vetter case 2: 147885f9e50dSDaniel Vetter case 3: 147985f9e50dSDaniel Vetter instdone[0] = I915_READ(INSTDONE); 148085f9e50dSDaniel Vetter break; 148185f9e50dSDaniel Vetter case 4: 148285f9e50dSDaniel Vetter case 5: 148385f9e50dSDaniel Vetter case 6: 148485f9e50dSDaniel Vetter instdone[0] = I915_READ(INSTDONE_I965); 148585f9e50dSDaniel Vetter instdone[1] = I915_READ(INSTDONE1); 148685f9e50dSDaniel Vetter break; 148785f9e50dSDaniel Vetter default: 148885f9e50dSDaniel Vetter WARN_ONCE(1, "Unsupported platform\n"); 148985f9e50dSDaniel Vetter case 7: 149085f9e50dSDaniel Vetter instdone[0] = I915_READ(GEN7_INSTDONE_1); 149185f9e50dSDaniel Vetter instdone[1] = I915_READ(GEN7_SC_INSTDONE); 149285f9e50dSDaniel Vetter instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 149385f9e50dSDaniel Vetter instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 149485f9e50dSDaniel Vetter break; 149585f9e50dSDaniel Vetter } 149685f9e50dSDaniel Vetter } 149785f9e50dSDaniel Vetter 14983bd3c932SChris Wilson #ifdef CONFIG_DEBUG_FS 14999df30794SChris Wilson static struct drm_i915_error_object * 1500d0d045e8SBen Widawsky i915_error_object_create_sized(struct drm_i915_private *dev_priv, 1501d0d045e8SBen Widawsky struct drm_i915_gem_object *src, 1502d0d045e8SBen Widawsky const int num_pages) 15039df30794SChris Wilson { 15049df30794SChris Wilson struct drm_i915_error_object *dst; 1505d0d045e8SBen Widawsky int i; 1506e56660ddSChris Wilson u32 reloc_offset; 15079df30794SChris Wilson 150805394f39SChris Wilson if (src == NULL || src->pages == NULL) 15099df30794SChris Wilson return NULL; 15109df30794SChris Wilson 1511d0d045e8SBen Widawsky dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 15129df30794SChris Wilson if (dst == NULL) 15139df30794SChris Wilson return NULL; 15149df30794SChris Wilson 151505394f39SChris Wilson reloc_offset = src->gtt_offset; 1516d0d045e8SBen Widawsky for (i = 0; i < num_pages; i++) { 1517788885aeSAndrew Morton unsigned long flags; 1518e56660ddSChris Wilson void *d; 1519788885aeSAndrew Morton 1520e56660ddSChris Wilson d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 15219df30794SChris Wilson if (d == NULL) 15229df30794SChris Wilson goto unwind; 1523e56660ddSChris Wilson 1524788885aeSAndrew Morton local_irq_save(flags); 15255d4545aeSBen Widawsky if (reloc_offset < dev_priv->gtt.mappable_end && 152674898d7eSDaniel Vetter src->has_global_gtt_mapping) { 1527172975aaSChris Wilson void __iomem *s; 1528172975aaSChris Wilson 1529172975aaSChris Wilson /* Simply ignore tiling or any overlapping fence. 1530172975aaSChris Wilson * It's part of the error state, and this hopefully 1531172975aaSChris Wilson * captures what the GPU read. 1532172975aaSChris Wilson */ 1533172975aaSChris Wilson 15345d4545aeSBen Widawsky s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 15353e4d3af5SPeter Zijlstra reloc_offset); 1536e56660ddSChris Wilson memcpy_fromio(d, s, PAGE_SIZE); 15373e4d3af5SPeter Zijlstra io_mapping_unmap_atomic(s); 1538960e3564SChris Wilson } else if (src->stolen) { 1539960e3564SChris Wilson unsigned long offset; 1540960e3564SChris Wilson 1541960e3564SChris Wilson offset = dev_priv->mm.stolen_base; 1542960e3564SChris Wilson offset += src->stolen->start; 1543960e3564SChris Wilson offset += i << PAGE_SHIFT; 1544960e3564SChris Wilson 15451a240d4dSDaniel Vetter memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); 1546172975aaSChris Wilson } else { 15479da3da66SChris Wilson struct page *page; 1548172975aaSChris Wilson void *s; 1549172975aaSChris Wilson 15509da3da66SChris Wilson page = i915_gem_object_get_page(src, i); 1551172975aaSChris Wilson 15529da3da66SChris Wilson drm_clflush_pages(&page, 1); 15539da3da66SChris Wilson 15549da3da66SChris Wilson s = kmap_atomic(page); 1555172975aaSChris Wilson memcpy(d, s, PAGE_SIZE); 1556172975aaSChris Wilson kunmap_atomic(s); 1557172975aaSChris Wilson 15589da3da66SChris Wilson drm_clflush_pages(&page, 1); 1559172975aaSChris Wilson } 1560788885aeSAndrew Morton local_irq_restore(flags); 1561e56660ddSChris Wilson 15629da3da66SChris Wilson dst->pages[i] = d; 1563e56660ddSChris Wilson 1564e56660ddSChris Wilson reloc_offset += PAGE_SIZE; 15659df30794SChris Wilson } 1566d0d045e8SBen Widawsky dst->page_count = num_pages; 156705394f39SChris Wilson dst->gtt_offset = src->gtt_offset; 15689df30794SChris Wilson 15699df30794SChris Wilson return dst; 15709df30794SChris Wilson 15719df30794SChris Wilson unwind: 15729da3da66SChris Wilson while (i--) 15739da3da66SChris Wilson kfree(dst->pages[i]); 15749df30794SChris Wilson kfree(dst); 15759df30794SChris Wilson return NULL; 15769df30794SChris Wilson } 1577d0d045e8SBen Widawsky #define i915_error_object_create(dev_priv, src) \ 1578d0d045e8SBen Widawsky i915_error_object_create_sized((dev_priv), (src), \ 1579d0d045e8SBen Widawsky (src)->base.size>>PAGE_SHIFT) 15809df30794SChris Wilson 15819df30794SChris Wilson static void 15829df30794SChris Wilson i915_error_object_free(struct drm_i915_error_object *obj) 15839df30794SChris Wilson { 15849df30794SChris Wilson int page; 15859df30794SChris Wilson 15869df30794SChris Wilson if (obj == NULL) 15879df30794SChris Wilson return; 15889df30794SChris Wilson 15899df30794SChris Wilson for (page = 0; page < obj->page_count; page++) 15909df30794SChris Wilson kfree(obj->pages[page]); 15919df30794SChris Wilson 15929df30794SChris Wilson kfree(obj); 15939df30794SChris Wilson } 15949df30794SChris Wilson 1595742cbee8SDaniel Vetter void 1596742cbee8SDaniel Vetter i915_error_state_free(struct kref *error_ref) 15979df30794SChris Wilson { 1598742cbee8SDaniel Vetter struct drm_i915_error_state *error = container_of(error_ref, 1599742cbee8SDaniel Vetter typeof(*error), ref); 1600e2f973d5SChris Wilson int i; 1601e2f973d5SChris Wilson 160252d39a21SChris Wilson for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 160352d39a21SChris Wilson i915_error_object_free(error->ring[i].batchbuffer); 160452d39a21SChris Wilson i915_error_object_free(error->ring[i].ringbuffer); 16057ed73da0SBen Widawsky i915_error_object_free(error->ring[i].ctx); 160652d39a21SChris Wilson kfree(error->ring[i].requests); 160752d39a21SChris Wilson } 1608e2f973d5SChris Wilson 16099df30794SChris Wilson kfree(error->active_bo); 16106ef3d427SChris Wilson kfree(error->overlay); 16117ed73da0SBen Widawsky kfree(error->display); 16129df30794SChris Wilson kfree(error); 16139df30794SChris Wilson } 16141b50247aSChris Wilson static void capture_bo(struct drm_i915_error_buffer *err, 16151b50247aSChris Wilson struct drm_i915_gem_object *obj) 1616c724e8a9SChris Wilson { 1617c724e8a9SChris Wilson err->size = obj->base.size; 1618c724e8a9SChris Wilson err->name = obj->base.name; 16190201f1ecSChris Wilson err->rseqno = obj->last_read_seqno; 16200201f1ecSChris Wilson err->wseqno = obj->last_write_seqno; 1621c724e8a9SChris Wilson err->gtt_offset = obj->gtt_offset; 1622c724e8a9SChris Wilson err->read_domains = obj->base.read_domains; 1623c724e8a9SChris Wilson err->write_domain = obj->base.write_domain; 1624c724e8a9SChris Wilson err->fence_reg = obj->fence_reg; 1625c724e8a9SChris Wilson err->pinned = 0; 1626c724e8a9SChris Wilson if (obj->pin_count > 0) 1627c724e8a9SChris Wilson err->pinned = 1; 1628c724e8a9SChris Wilson if (obj->user_pin_count > 0) 1629c724e8a9SChris Wilson err->pinned = -1; 1630c724e8a9SChris Wilson err->tiling = obj->tiling_mode; 1631c724e8a9SChris Wilson err->dirty = obj->dirty; 1632c724e8a9SChris Wilson err->purgeable = obj->madv != I915_MADV_WILLNEED; 163396154f2fSDaniel Vetter err->ring = obj->ring ? obj->ring->id : -1; 163493dfb40cSChris Wilson err->cache_level = obj->cache_level; 16351b50247aSChris Wilson } 1636c724e8a9SChris Wilson 16371b50247aSChris Wilson static u32 capture_active_bo(struct drm_i915_error_buffer *err, 16381b50247aSChris Wilson int count, struct list_head *head) 16391b50247aSChris Wilson { 16401b50247aSChris Wilson struct drm_i915_gem_object *obj; 16411b50247aSChris Wilson int i = 0; 16421b50247aSChris Wilson 16431b50247aSChris Wilson list_for_each_entry(obj, head, mm_list) { 16441b50247aSChris Wilson capture_bo(err++, obj); 1645c724e8a9SChris Wilson if (++i == count) 1646c724e8a9SChris Wilson break; 16471b50247aSChris Wilson } 1648c724e8a9SChris Wilson 16491b50247aSChris Wilson return i; 16501b50247aSChris Wilson } 16511b50247aSChris Wilson 16521b50247aSChris Wilson static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 16531b50247aSChris Wilson int count, struct list_head *head) 16541b50247aSChris Wilson { 16551b50247aSChris Wilson struct drm_i915_gem_object *obj; 16561b50247aSChris Wilson int i = 0; 16571b50247aSChris Wilson 165835c20a60SBen Widawsky list_for_each_entry(obj, head, global_list) { 16591b50247aSChris Wilson if (obj->pin_count == 0) 16601b50247aSChris Wilson continue; 16611b50247aSChris Wilson 16621b50247aSChris Wilson capture_bo(err++, obj); 16631b50247aSChris Wilson if (++i == count) 16641b50247aSChris Wilson break; 1665c724e8a9SChris Wilson } 1666c724e8a9SChris Wilson 1667c724e8a9SChris Wilson return i; 1668c724e8a9SChris Wilson } 1669c724e8a9SChris Wilson 1670748ebc60SChris Wilson static void i915_gem_record_fences(struct drm_device *dev, 1671748ebc60SChris Wilson struct drm_i915_error_state *error) 1672748ebc60SChris Wilson { 1673748ebc60SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1674748ebc60SChris Wilson int i; 1675748ebc60SChris Wilson 1676748ebc60SChris Wilson /* Fences */ 1677748ebc60SChris Wilson switch (INTEL_INFO(dev)->gen) { 1678775d17b6SDaniel Vetter case 7: 1679748ebc60SChris Wilson case 6: 168042b5aeabSVille Syrjälä for (i = 0; i < dev_priv->num_fence_regs; i++) 1681748ebc60SChris Wilson error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1682748ebc60SChris Wilson break; 1683748ebc60SChris Wilson case 5: 1684748ebc60SChris Wilson case 4: 1685748ebc60SChris Wilson for (i = 0; i < 16; i++) 1686748ebc60SChris Wilson error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1687748ebc60SChris Wilson break; 1688748ebc60SChris Wilson case 3: 1689748ebc60SChris Wilson if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1690748ebc60SChris Wilson for (i = 0; i < 8; i++) 1691748ebc60SChris Wilson error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1692748ebc60SChris Wilson case 2: 1693748ebc60SChris Wilson for (i = 0; i < 8; i++) 1694748ebc60SChris Wilson error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1695748ebc60SChris Wilson break; 1696748ebc60SChris Wilson 16977dbf9d6eSBen Widawsky default: 16987dbf9d6eSBen Widawsky BUG(); 1699748ebc60SChris Wilson } 1700748ebc60SChris Wilson } 1701748ebc60SChris Wilson 1702bcfb2e28SChris Wilson static struct drm_i915_error_object * 1703bcfb2e28SChris Wilson i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1704bcfb2e28SChris Wilson struct intel_ring_buffer *ring) 1705bcfb2e28SChris Wilson { 1706bcfb2e28SChris Wilson struct drm_i915_gem_object *obj; 1707bcfb2e28SChris Wilson u32 seqno; 1708bcfb2e28SChris Wilson 1709bcfb2e28SChris Wilson if (!ring->get_seqno) 1710bcfb2e28SChris Wilson return NULL; 1711bcfb2e28SChris Wilson 1712b45305fcSDaniel Vetter if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1713b45305fcSDaniel Vetter u32 acthd = I915_READ(ACTHD); 1714b45305fcSDaniel Vetter 1715b45305fcSDaniel Vetter if (WARN_ON(ring->id != RCS)) 1716b45305fcSDaniel Vetter return NULL; 1717b45305fcSDaniel Vetter 1718b45305fcSDaniel Vetter obj = ring->private; 1719b45305fcSDaniel Vetter if (acthd >= obj->gtt_offset && 1720b45305fcSDaniel Vetter acthd < obj->gtt_offset + obj->base.size) 1721b45305fcSDaniel Vetter return i915_error_object_create(dev_priv, obj); 1722b45305fcSDaniel Vetter } 1723b45305fcSDaniel Vetter 1724b2eadbc8SChris Wilson seqno = ring->get_seqno(ring, false); 1725bcfb2e28SChris Wilson list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1726bcfb2e28SChris Wilson if (obj->ring != ring) 1727bcfb2e28SChris Wilson continue; 1728bcfb2e28SChris Wilson 17290201f1ecSChris Wilson if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1730bcfb2e28SChris Wilson continue; 1731bcfb2e28SChris Wilson 1732bcfb2e28SChris Wilson if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1733bcfb2e28SChris Wilson continue; 1734bcfb2e28SChris Wilson 1735bcfb2e28SChris Wilson /* We need to copy these to an anonymous buffer as the simplest 1736bcfb2e28SChris Wilson * method to avoid being overwritten by userspace. 1737bcfb2e28SChris Wilson */ 1738bcfb2e28SChris Wilson return i915_error_object_create(dev_priv, obj); 1739bcfb2e28SChris Wilson } 1740bcfb2e28SChris Wilson 1741bcfb2e28SChris Wilson return NULL; 1742bcfb2e28SChris Wilson } 1743bcfb2e28SChris Wilson 1744d27b1e0eSDaniel Vetter static void i915_record_ring_state(struct drm_device *dev, 1745d27b1e0eSDaniel Vetter struct drm_i915_error_state *error, 1746d27b1e0eSDaniel Vetter struct intel_ring_buffer *ring) 1747d27b1e0eSDaniel Vetter { 1748d27b1e0eSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 1749d27b1e0eSDaniel Vetter 175033f3f518SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 175112f55818SChris Wilson error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 175233f3f518SDaniel Vetter error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 17537e3b8737SDaniel Vetter error->semaphore_mboxes[ring->id][0] 17547e3b8737SDaniel Vetter = I915_READ(RING_SYNC_0(ring->mmio_base)); 17557e3b8737SDaniel Vetter error->semaphore_mboxes[ring->id][1] 17567e3b8737SDaniel Vetter = I915_READ(RING_SYNC_1(ring->mmio_base)); 1757df2b23d9SChris Wilson error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1758df2b23d9SChris Wilson error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 175933f3f518SDaniel Vetter } 1760c1cd90edSDaniel Vetter 1761d27b1e0eSDaniel Vetter if (INTEL_INFO(dev)->gen >= 4) { 17629d2f41faSDaniel Vetter error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1763d27b1e0eSDaniel Vetter error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1764d27b1e0eSDaniel Vetter error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1765d27b1e0eSDaniel Vetter error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1766c1cd90edSDaniel Vetter error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1767050ee91fSBen Widawsky if (ring->id == RCS) 1768d27b1e0eSDaniel Vetter error->bbaddr = I915_READ64(BB_ADDR); 1769d27b1e0eSDaniel Vetter } else { 17709d2f41faSDaniel Vetter error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1771d27b1e0eSDaniel Vetter error->ipeir[ring->id] = I915_READ(IPEIR); 1772d27b1e0eSDaniel Vetter error->ipehr[ring->id] = I915_READ(IPEHR); 1773d27b1e0eSDaniel Vetter error->instdone[ring->id] = I915_READ(INSTDONE); 1774d27b1e0eSDaniel Vetter } 1775d27b1e0eSDaniel Vetter 17769574b3feSBen Widawsky error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1777c1cd90edSDaniel Vetter error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1778b2eadbc8SChris Wilson error->seqno[ring->id] = ring->get_seqno(ring, false); 1779d27b1e0eSDaniel Vetter error->acthd[ring->id] = intel_ring_get_active_head(ring); 1780c1cd90edSDaniel Vetter error->head[ring->id] = I915_READ_HEAD(ring); 1781c1cd90edSDaniel Vetter error->tail[ring->id] = I915_READ_TAIL(ring); 17820f3b6849SChris Wilson error->ctl[ring->id] = I915_READ_CTL(ring); 17837e3b8737SDaniel Vetter 17847e3b8737SDaniel Vetter error->cpu_ring_head[ring->id] = ring->head; 17857e3b8737SDaniel Vetter error->cpu_ring_tail[ring->id] = ring->tail; 1786d27b1e0eSDaniel Vetter } 1787d27b1e0eSDaniel Vetter 17888c123e54SBen Widawsky 17898c123e54SBen Widawsky static void i915_gem_record_active_context(struct intel_ring_buffer *ring, 17908c123e54SBen Widawsky struct drm_i915_error_state *error, 17918c123e54SBen Widawsky struct drm_i915_error_ring *ering) 17928c123e54SBen Widawsky { 17938c123e54SBen Widawsky struct drm_i915_private *dev_priv = ring->dev->dev_private; 17948c123e54SBen Widawsky struct drm_i915_gem_object *obj; 17958c123e54SBen Widawsky 17968c123e54SBen Widawsky /* Currently render ring is the only HW context user */ 17978c123e54SBen Widawsky if (ring->id != RCS || !error->ccid) 17988c123e54SBen Widawsky return; 17998c123e54SBen Widawsky 180035c20a60SBen Widawsky list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 18018c123e54SBen Widawsky if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { 18028c123e54SBen Widawsky ering->ctx = i915_error_object_create_sized(dev_priv, 18038c123e54SBen Widawsky obj, 1); 18048c123e54SBen Widawsky } 18058c123e54SBen Widawsky } 18068c123e54SBen Widawsky } 18078c123e54SBen Widawsky 180852d39a21SChris Wilson static void i915_gem_record_rings(struct drm_device *dev, 180952d39a21SChris Wilson struct drm_i915_error_state *error) 181052d39a21SChris Wilson { 181152d39a21SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1812b4519513SChris Wilson struct intel_ring_buffer *ring; 181352d39a21SChris Wilson struct drm_i915_gem_request *request; 181452d39a21SChris Wilson int i, count; 181552d39a21SChris Wilson 1816b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 181752d39a21SChris Wilson i915_record_ring_state(dev, error, ring); 181852d39a21SChris Wilson 181952d39a21SChris Wilson error->ring[i].batchbuffer = 182052d39a21SChris Wilson i915_error_first_batchbuffer(dev_priv, ring); 182152d39a21SChris Wilson 182252d39a21SChris Wilson error->ring[i].ringbuffer = 182352d39a21SChris Wilson i915_error_object_create(dev_priv, ring->obj); 182452d39a21SChris Wilson 18258c123e54SBen Widawsky 18268c123e54SBen Widawsky i915_gem_record_active_context(ring, error, &error->ring[i]); 18278c123e54SBen Widawsky 182852d39a21SChris Wilson count = 0; 182952d39a21SChris Wilson list_for_each_entry(request, &ring->request_list, list) 183052d39a21SChris Wilson count++; 183152d39a21SChris Wilson 183252d39a21SChris Wilson error->ring[i].num_requests = count; 183352d39a21SChris Wilson error->ring[i].requests = 183452d39a21SChris Wilson kmalloc(count*sizeof(struct drm_i915_error_request), 183552d39a21SChris Wilson GFP_ATOMIC); 183652d39a21SChris Wilson if (error->ring[i].requests == NULL) { 183752d39a21SChris Wilson error->ring[i].num_requests = 0; 183852d39a21SChris Wilson continue; 183952d39a21SChris Wilson } 184052d39a21SChris Wilson 184152d39a21SChris Wilson count = 0; 184252d39a21SChris Wilson list_for_each_entry(request, &ring->request_list, list) { 184352d39a21SChris Wilson struct drm_i915_error_request *erq; 184452d39a21SChris Wilson 184552d39a21SChris Wilson erq = &error->ring[i].requests[count++]; 184652d39a21SChris Wilson erq->seqno = request->seqno; 184752d39a21SChris Wilson erq->jiffies = request->emitted_jiffies; 1848ee4f42b1SChris Wilson erq->tail = request->tail; 184952d39a21SChris Wilson } 185052d39a21SChris Wilson } 185152d39a21SChris Wilson } 185252d39a21SChris Wilson 18538a905236SJesse Barnes /** 18548a905236SJesse Barnes * i915_capture_error_state - capture an error record for later analysis 18558a905236SJesse Barnes * @dev: drm device 18568a905236SJesse Barnes * 18578a905236SJesse Barnes * Should be called when an error is detected (either a hang or an error 18588a905236SJesse Barnes * interrupt) to capture error state from the time of the error. Fills 18598a905236SJesse Barnes * out a structure which becomes available in debugfs for user level tools 18608a905236SJesse Barnes * to pick up. 18618a905236SJesse Barnes */ 186263eeaf38SJesse Barnes static void i915_capture_error_state(struct drm_device *dev) 186363eeaf38SJesse Barnes { 186463eeaf38SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 186505394f39SChris Wilson struct drm_i915_gem_object *obj; 186663eeaf38SJesse Barnes struct drm_i915_error_state *error; 186763eeaf38SJesse Barnes unsigned long flags; 18689db4a9c7SJesse Barnes int i, pipe; 186963eeaf38SJesse Barnes 187099584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 187199584db3SDaniel Vetter error = dev_priv->gpu_error.first_error; 187299584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 18739df30794SChris Wilson if (error) 18749df30794SChris Wilson return; 187563eeaf38SJesse Barnes 18769db4a9c7SJesse Barnes /* Account for pipe specific data like PIPE*STAT */ 187733f3f518SDaniel Vetter error = kzalloc(sizeof(*error), GFP_ATOMIC); 187863eeaf38SJesse Barnes if (!error) { 18799df30794SChris Wilson DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 18809df30794SChris Wilson return; 188163eeaf38SJesse Barnes } 188263eeaf38SJesse Barnes 18832f86f191SBen Widawsky DRM_INFO("capturing error event; look for more information in " 18842f86f191SBen Widawsky "/sys/kernel/debug/dri/%d/i915_error_state\n", 1885b6f7833bSChris Wilson dev->primary->index); 18862fa772f3SChris Wilson 1887742cbee8SDaniel Vetter kref_init(&error->ref); 188863eeaf38SJesse Barnes error->eir = I915_READ(EIR); 188963eeaf38SJesse Barnes error->pgtbl_er = I915_READ(PGTBL_ER); 1890211816ecSBen Widawsky if (HAS_HW_CONTEXTS(dev)) 1891b9a3906bSBen Widawsky error->ccid = I915_READ(CCID); 1892be998e2eSBen Widawsky 1893be998e2eSBen Widawsky if (HAS_PCH_SPLIT(dev)) 1894be998e2eSBen Widawsky error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1895be998e2eSBen Widawsky else if (IS_VALLEYVIEW(dev)) 1896be998e2eSBen Widawsky error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1897be998e2eSBen Widawsky else if (IS_GEN2(dev)) 1898be998e2eSBen Widawsky error->ier = I915_READ16(IER); 1899be998e2eSBen Widawsky else 1900be998e2eSBen Widawsky error->ier = I915_READ(IER); 1901be998e2eSBen Widawsky 19020f3b6849SChris Wilson if (INTEL_INFO(dev)->gen >= 6) 19030f3b6849SChris Wilson error->derrmr = I915_READ(DERRMR); 19040f3b6849SChris Wilson 19050f3b6849SChris Wilson if (IS_VALLEYVIEW(dev)) 19060f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE_VLV); 19070f3b6849SChris Wilson else if (INTEL_INFO(dev)->gen >= 7) 19080f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE_MT); 19090f3b6849SChris Wilson else if (INTEL_INFO(dev)->gen == 6) 19100f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE); 19110f3b6849SChris Wilson 19124f3308b9SPaulo Zanoni if (!HAS_PCH_SPLIT(dev)) 19139db4a9c7SJesse Barnes for_each_pipe(pipe) 19149db4a9c7SJesse Barnes error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1915d27b1e0eSDaniel Vetter 191633f3f518SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 1917f406839fSChris Wilson error->error = I915_READ(ERROR_GEN6); 191833f3f518SDaniel Vetter error->done_reg = I915_READ(DONE_REG); 191933f3f518SDaniel Vetter } 1920add354ddSChris Wilson 192171e172e8SBen Widawsky if (INTEL_INFO(dev)->gen == 7) 192271e172e8SBen Widawsky error->err_int = I915_READ(GEN7_ERR_INT); 192371e172e8SBen Widawsky 1924050ee91fSBen Widawsky i915_get_extra_instdone(dev, error->extra_instdone); 1925050ee91fSBen Widawsky 1926748ebc60SChris Wilson i915_gem_record_fences(dev, error); 192752d39a21SChris Wilson i915_gem_record_rings(dev, error); 19289df30794SChris Wilson 1929c724e8a9SChris Wilson /* Record buffers on the active and pinned lists. */ 19309df30794SChris Wilson error->active_bo = NULL; 1931c724e8a9SChris Wilson error->pinned_bo = NULL; 19329df30794SChris Wilson 1933bcfb2e28SChris Wilson i = 0; 1934bcfb2e28SChris Wilson list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1935bcfb2e28SChris Wilson i++; 1936bcfb2e28SChris Wilson error->active_bo_count = i; 193735c20a60SBen Widawsky list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 19381b50247aSChris Wilson if (obj->pin_count) 1939bcfb2e28SChris Wilson i++; 1940bcfb2e28SChris Wilson error->pinned_bo_count = i - error->active_bo_count; 1941c724e8a9SChris Wilson 19428e934dbfSChris Wilson error->active_bo = NULL; 19438e934dbfSChris Wilson error->pinned_bo = NULL; 1944bcfb2e28SChris Wilson if (i) { 1945bcfb2e28SChris Wilson error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 19469df30794SChris Wilson GFP_ATOMIC); 1947c724e8a9SChris Wilson if (error->active_bo) 1948c724e8a9SChris Wilson error->pinned_bo = 1949c724e8a9SChris Wilson error->active_bo + error->active_bo_count; 19509df30794SChris Wilson } 1951c724e8a9SChris Wilson 1952c724e8a9SChris Wilson if (error->active_bo) 1953c724e8a9SChris Wilson error->active_bo_count = 19541b50247aSChris Wilson capture_active_bo(error->active_bo, 1955c724e8a9SChris Wilson error->active_bo_count, 1956c724e8a9SChris Wilson &dev_priv->mm.active_list); 1957c724e8a9SChris Wilson 1958c724e8a9SChris Wilson if (error->pinned_bo) 1959c724e8a9SChris Wilson error->pinned_bo_count = 19601b50247aSChris Wilson capture_pinned_bo(error->pinned_bo, 1961c724e8a9SChris Wilson error->pinned_bo_count, 19626c085a72SChris Wilson &dev_priv->mm.bound_list); 196363eeaf38SJesse Barnes 19648a905236SJesse Barnes do_gettimeofday(&error->time); 19658a905236SJesse Barnes 19666ef3d427SChris Wilson error->overlay = intel_overlay_capture_error_state(dev); 1967c4a1d9e4SChris Wilson error->display = intel_display_capture_error_state(dev); 19686ef3d427SChris Wilson 196999584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 197099584db3SDaniel Vetter if (dev_priv->gpu_error.first_error == NULL) { 197199584db3SDaniel Vetter dev_priv->gpu_error.first_error = error; 19729df30794SChris Wilson error = NULL; 19739df30794SChris Wilson } 197499584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 19759df30794SChris Wilson 19769df30794SChris Wilson if (error) 1977742cbee8SDaniel Vetter i915_error_state_free(&error->ref); 19789df30794SChris Wilson } 19799df30794SChris Wilson 19809df30794SChris Wilson void i915_destroy_error_state(struct drm_device *dev) 19819df30794SChris Wilson { 19829df30794SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 19839df30794SChris Wilson struct drm_i915_error_state *error; 19846dc0e816SBen Widawsky unsigned long flags; 19859df30794SChris Wilson 198699584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 198799584db3SDaniel Vetter error = dev_priv->gpu_error.first_error; 198899584db3SDaniel Vetter dev_priv->gpu_error.first_error = NULL; 198999584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 19909df30794SChris Wilson 19919df30794SChris Wilson if (error) 1992742cbee8SDaniel Vetter kref_put(&error->ref, i915_error_state_free); 199363eeaf38SJesse Barnes } 19943bd3c932SChris Wilson #else 19953bd3c932SChris Wilson #define i915_capture_error_state(x) 19963bd3c932SChris Wilson #endif 199763eeaf38SJesse Barnes 199835aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1999c0e09200SDave Airlie { 20008a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 2001bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 200263eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 2003050ee91fSBen Widawsky int pipe, i; 200463eeaf38SJesse Barnes 200535aed2e6SChris Wilson if (!eir) 200635aed2e6SChris Wilson return; 200763eeaf38SJesse Barnes 2008a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 20098a905236SJesse Barnes 2010bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 2011bd9854f9SBen Widawsky 20128a905236SJesse Barnes if (IS_G4X(dev)) { 20138a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 20148a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 20158a905236SJesse Barnes 2016a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2017a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2018050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 2019050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2020a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2021a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 20228a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 20233143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 20248a905236SJesse Barnes } 20258a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 20268a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2027a70491ccSJoe Perches pr_err("page table error\n"); 2028a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 20298a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20303143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 20318a905236SJesse Barnes } 20328a905236SJesse Barnes } 20338a905236SJesse Barnes 2034a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 203563eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 203663eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2037a70491ccSJoe Perches pr_err("page table error\n"); 2038a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 203963eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20403143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 204163eeaf38SJesse Barnes } 20428a905236SJesse Barnes } 20438a905236SJesse Barnes 204463eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 2045a70491ccSJoe Perches pr_err("memory refresh error:\n"); 20469db4a9c7SJesse Barnes for_each_pipe(pipe) 2047a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 20489db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 204963eeaf38SJesse Barnes /* pipestat has already been acked */ 205063eeaf38SJesse Barnes } 205163eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 2052a70491ccSJoe Perches pr_err("instruction error\n"); 2053a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2054050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 2055050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2056a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 205763eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 205863eeaf38SJesse Barnes 2059a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2060a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2061a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 206263eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 20633143a2bfSChris Wilson POSTING_READ(IPEIR); 206463eeaf38SJesse Barnes } else { 206563eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 206663eeaf38SJesse Barnes 2067a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2068a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2069a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2070a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 207163eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 20723143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 207363eeaf38SJesse Barnes } 207463eeaf38SJesse Barnes } 207563eeaf38SJesse Barnes 207663eeaf38SJesse Barnes I915_WRITE(EIR, eir); 20773143a2bfSChris Wilson POSTING_READ(EIR); 207863eeaf38SJesse Barnes eir = I915_READ(EIR); 207963eeaf38SJesse Barnes if (eir) { 208063eeaf38SJesse Barnes /* 208163eeaf38SJesse Barnes * some errors might have become stuck, 208263eeaf38SJesse Barnes * mask them. 208363eeaf38SJesse Barnes */ 208463eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 208563eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 208663eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 208763eeaf38SJesse Barnes } 208835aed2e6SChris Wilson } 208935aed2e6SChris Wilson 209035aed2e6SChris Wilson /** 209135aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 209235aed2e6SChris Wilson * @dev: drm device 209335aed2e6SChris Wilson * 209435aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 209535aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 209635aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 209735aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 209835aed2e6SChris Wilson * of a ring dump etc.). 209935aed2e6SChris Wilson */ 2100527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 210135aed2e6SChris Wilson { 210235aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 2103b4519513SChris Wilson struct intel_ring_buffer *ring; 2104b4519513SChris Wilson int i; 210535aed2e6SChris Wilson 210635aed2e6SChris Wilson i915_capture_error_state(dev); 210735aed2e6SChris Wilson i915_report_and_clear_eir(dev); 21088a905236SJesse Barnes 2109ba1234d1SBen Gamari if (wedged) { 2110f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2111f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 2112ba1234d1SBen Gamari 211311ed50ecSBen Gamari /* 21141f83fee0SDaniel Vetter * Wakeup waiting processes so that the reset work item 21151f83fee0SDaniel Vetter * doesn't deadlock trying to grab various locks. 211611ed50ecSBen Gamari */ 2117b4519513SChris Wilson for_each_ring(ring, dev_priv, i) 2118b4519513SChris Wilson wake_up_all(&ring->irq_queue); 211911ed50ecSBen Gamari } 212011ed50ecSBen Gamari 212199584db3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 21228a905236SJesse Barnes } 21238a905236SJesse Barnes 212421ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 21254e5359cdSSimon Farnsworth { 21264e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 21274e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 21284e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 212905394f39SChris Wilson struct drm_i915_gem_object *obj; 21304e5359cdSSimon Farnsworth struct intel_unpin_work *work; 21314e5359cdSSimon Farnsworth unsigned long flags; 21324e5359cdSSimon Farnsworth bool stall_detected; 21334e5359cdSSimon Farnsworth 21344e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 21354e5359cdSSimon Farnsworth if (intel_crtc == NULL) 21364e5359cdSSimon Farnsworth return; 21374e5359cdSSimon Farnsworth 21384e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 21394e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 21404e5359cdSSimon Farnsworth 2141e7d841caSChris Wilson if (work == NULL || 2142e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2143e7d841caSChris Wilson !work->enable_stall_check) { 21444e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 21454e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 21464e5359cdSSimon Farnsworth return; 21474e5359cdSSimon Farnsworth } 21484e5359cdSSimon Farnsworth 21494e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 215005394f39SChris Wilson obj = work->pending_flip_obj; 2151a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 21529db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 2153446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2154446f2545SArmin Reese obj->gtt_offset; 21554e5359cdSSimon Farnsworth } else { 21569db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 215705394f39SChris Wilson stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 215801f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 21594e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 21604e5359cdSSimon Farnsworth } 21614e5359cdSSimon Farnsworth 21624e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 21634e5359cdSSimon Farnsworth 21644e5359cdSSimon Farnsworth if (stall_detected) { 21654e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 21664e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 21674e5359cdSSimon Farnsworth } 21684e5359cdSSimon Farnsworth } 21694e5359cdSSimon Farnsworth 217042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 217142f52ef8SKeith Packard * we use as a pipe index 217242f52ef8SKeith Packard */ 2173f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 21740a3e67a4SJesse Barnes { 21750a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2176e9d21d7fSKeith Packard unsigned long irqflags; 217771e0ffa5SJesse Barnes 21785eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 217971e0ffa5SJesse Barnes return -EINVAL; 21800a3e67a4SJesse Barnes 21811ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2182f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 21837c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 21847c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 21850a3e67a4SJesse Barnes else 21867c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 21877c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 21888692d00eSChris Wilson 21898692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 21908692d00eSChris Wilson if (dev_priv->info->gen == 3) 21916b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 21921ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 21938692d00eSChris Wilson 21940a3e67a4SJesse Barnes return 0; 21950a3e67a4SJesse Barnes } 21960a3e67a4SJesse Barnes 2197f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2198f796cf8fSJesse Barnes { 2199f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2200f796cf8fSJesse Barnes unsigned long irqflags; 2201f796cf8fSJesse Barnes 2202f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2203f796cf8fSJesse Barnes return -EINVAL; 2204f796cf8fSJesse Barnes 2205f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2206f796cf8fSJesse Barnes ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 2207f796cf8fSJesse Barnes DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2208f796cf8fSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2209f796cf8fSJesse Barnes 2210f796cf8fSJesse Barnes return 0; 2211f796cf8fSJesse Barnes } 2212f796cf8fSJesse Barnes 2213f71d4af4SJesse Barnes static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 2214b1f14ad0SJesse Barnes { 2215b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2216b1f14ad0SJesse Barnes unsigned long irqflags; 2217b1f14ad0SJesse Barnes 2218b1f14ad0SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2219b1f14ad0SJesse Barnes return -EINVAL; 2220b1f14ad0SJesse Barnes 2221b1f14ad0SJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2222b615b57aSChris Wilson ironlake_enable_display_irq(dev_priv, 2223b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB << (5 * pipe)); 2224b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2225b1f14ad0SJesse Barnes 2226b1f14ad0SJesse Barnes return 0; 2227b1f14ad0SJesse Barnes } 2228b1f14ad0SJesse Barnes 22297e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 22307e231dbeSJesse Barnes { 22317e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22327e231dbeSJesse Barnes unsigned long irqflags; 223331acc7f5SJesse Barnes u32 imr; 22347e231dbeSJesse Barnes 22357e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 22367e231dbeSJesse Barnes return -EINVAL; 22377e231dbeSJesse Barnes 22387e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22397e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 224031acc7f5SJesse Barnes if (pipe == 0) 22417e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 224231acc7f5SJesse Barnes else 22437e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22447e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 224531acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 224631acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 22477e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22487e231dbeSJesse Barnes 22497e231dbeSJesse Barnes return 0; 22507e231dbeSJesse Barnes } 22517e231dbeSJesse Barnes 225242f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 225342f52ef8SKeith Packard * we use as a pipe index 225442f52ef8SKeith Packard */ 2255f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 22560a3e67a4SJesse Barnes { 22570a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2258e9d21d7fSKeith Packard unsigned long irqflags; 22590a3e67a4SJesse Barnes 22601ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22618692d00eSChris Wilson if (dev_priv->info->gen == 3) 22626b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 22638692d00eSChris Wilson 22647c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 22657c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 22667c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 22671ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22680a3e67a4SJesse Barnes } 22690a3e67a4SJesse Barnes 2270f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2271f796cf8fSJesse Barnes { 2272f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2273f796cf8fSJesse Barnes unsigned long irqflags; 2274f796cf8fSJesse Barnes 2275f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2276f796cf8fSJesse Barnes ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 2277f796cf8fSJesse Barnes DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2278f796cf8fSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2279f796cf8fSJesse Barnes } 2280f796cf8fSJesse Barnes 2281f71d4af4SJesse Barnes static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 2282b1f14ad0SJesse Barnes { 2283b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2284b1f14ad0SJesse Barnes unsigned long irqflags; 2285b1f14ad0SJesse Barnes 2286b1f14ad0SJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2287b615b57aSChris Wilson ironlake_disable_display_irq(dev_priv, 2288b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB << (pipe * 5)); 2289b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2290b1f14ad0SJesse Barnes } 2291b1f14ad0SJesse Barnes 22927e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 22937e231dbeSJesse Barnes { 22947e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22957e231dbeSJesse Barnes unsigned long irqflags; 229631acc7f5SJesse Barnes u32 imr; 22977e231dbeSJesse Barnes 22987e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 229931acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 230031acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 23017e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 230231acc7f5SJesse Barnes if (pipe == 0) 23037e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 230431acc7f5SJesse Barnes else 23057e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 23067e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 23077e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 23087e231dbeSJesse Barnes } 23097e231dbeSJesse Barnes 2310893eead0SChris Wilson static u32 2311893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 2312852835f3SZou Nan hai { 2313893eead0SChris Wilson return list_entry(ring->request_list.prev, 2314893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 2315893eead0SChris Wilson } 2316893eead0SChris Wilson 23179107e9d2SChris Wilson static bool 23189107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2319893eead0SChris Wilson { 23209107e9d2SChris Wilson return (list_empty(&ring->request_list) || 23219107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 2322f65d9421SBen Gamari } 2323f65d9421SBen Gamari 2324*6274f212SChris Wilson static struct intel_ring_buffer * 2325*6274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2326a24a11e6SChris Wilson { 2327a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 2328*6274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 2329a24a11e6SChris Wilson 2330a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2331a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 2332a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2333*6274f212SChris Wilson return NULL; 2334a24a11e6SChris Wilson 2335a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 2336a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 2337a24a11e6SChris Wilson */ 2338*6274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2339a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 2340a24a11e6SChris Wilson do { 2341a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 2342a24a11e6SChris Wilson if (cmd == ipehr) 2343a24a11e6SChris Wilson break; 2344a24a11e6SChris Wilson 2345a24a11e6SChris Wilson acthd -= 4; 2346a24a11e6SChris Wilson if (acthd < acthd_min) 2347*6274f212SChris Wilson return NULL; 2348a24a11e6SChris Wilson } while (1); 2349a24a11e6SChris Wilson 2350*6274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2351*6274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2352a24a11e6SChris Wilson } 2353a24a11e6SChris Wilson 2354*6274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 2355*6274f212SChris Wilson { 2356*6274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 2357*6274f212SChris Wilson struct intel_ring_buffer *signaller; 2358*6274f212SChris Wilson u32 seqno, ctl; 2359*6274f212SChris Wilson 2360*6274f212SChris Wilson ring->hangcheck.deadlock = true; 2361*6274f212SChris Wilson 2362*6274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 2363*6274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 2364*6274f212SChris Wilson return -1; 2365*6274f212SChris Wilson 2366*6274f212SChris Wilson /* cursory check for an unkickable deadlock */ 2367*6274f212SChris Wilson ctl = I915_READ_CTL(signaller); 2368*6274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2369*6274f212SChris Wilson return -1; 2370*6274f212SChris Wilson 2371*6274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2372*6274f212SChris Wilson } 2373*6274f212SChris Wilson 2374*6274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2375*6274f212SChris Wilson { 2376*6274f212SChris Wilson struct intel_ring_buffer *ring; 2377*6274f212SChris Wilson int i; 2378*6274f212SChris Wilson 2379*6274f212SChris Wilson for_each_ring(ring, dev_priv, i) 2380*6274f212SChris Wilson ring->hangcheck.deadlock = false; 2381*6274f212SChris Wilson } 2382*6274f212SChris Wilson 2383*6274f212SChris Wilson static enum { wait, active, kick, hung } ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 23841ec14ad3SChris Wilson { 23851ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 23861ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 23879107e9d2SChris Wilson u32 tmp; 23889107e9d2SChris Wilson 2389*6274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 2390*6274f212SChris Wilson return active; 2391*6274f212SChris Wilson 23929107e9d2SChris Wilson if (IS_GEN2(dev)) 2393*6274f212SChris Wilson return hung; 23949107e9d2SChris Wilson 23959107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 23969107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 23979107e9d2SChris Wilson * and break the hang. This should work on 23989107e9d2SChris Wilson * all but the second generation chipsets. 23999107e9d2SChris Wilson */ 24009107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 24011ec14ad3SChris Wilson if (tmp & RING_WAIT) { 24021ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 24031ec14ad3SChris Wilson ring->name); 24041ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 2405*6274f212SChris Wilson return kick; 24061ec14ad3SChris Wilson } 2407a24a11e6SChris Wilson 2408*6274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2409*6274f212SChris Wilson switch (semaphore_passed(ring)) { 2410*6274f212SChris Wilson default: 2411*6274f212SChris Wilson return hung; 2412*6274f212SChris Wilson case 1: 2413a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 2414a24a11e6SChris Wilson ring->name); 2415a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 2416*6274f212SChris Wilson return kick; 2417*6274f212SChris Wilson case 0: 2418*6274f212SChris Wilson return wait; 2419*6274f212SChris Wilson } 24209107e9d2SChris Wilson } 24219107e9d2SChris Wilson 2422*6274f212SChris Wilson return hung; 2423a24a11e6SChris Wilson } 2424d1e61e7fSChris Wilson 2425f65d9421SBen Gamari /** 2426f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 242705407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 242805407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 242905407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 243005407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 243105407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 2432f65d9421SBen Gamari */ 2433f65d9421SBen Gamari void i915_hangcheck_elapsed(unsigned long data) 2434f65d9421SBen Gamari { 2435f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2436f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2437b4519513SChris Wilson struct intel_ring_buffer *ring; 2438b4519513SChris Wilson int i; 243905407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 24409107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 24419107e9d2SChris Wilson #define BUSY 1 24429107e9d2SChris Wilson #define KICK 5 24439107e9d2SChris Wilson #define HUNG 20 24449107e9d2SChris Wilson #define FIRE 30 2445893eead0SChris Wilson 24463e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 24473e0dc6b0SBen Widawsky return; 24483e0dc6b0SBen Widawsky 2449b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 245005407ff8SMika Kuoppala u32 seqno, acthd; 24519107e9d2SChris Wilson bool busy = true; 2452b4519513SChris Wilson 2453*6274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 2454*6274f212SChris Wilson 245505407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 245605407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 245705407ff8SMika Kuoppala 245805407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 24599107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 24609107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 24619107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 24629107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 24639107e9d2SChris Wilson ring->name); 24649107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 24659107e9d2SChris Wilson ring->hangcheck.score += HUNG; 24669107e9d2SChris Wilson } else 24679107e9d2SChris Wilson busy = false; 246805407ff8SMika Kuoppala } else { 24699107e9d2SChris Wilson int score; 24709107e9d2SChris Wilson 2471*6274f212SChris Wilson /* We always increment the hangcheck score 2472*6274f212SChris Wilson * if the ring is busy and still processing 2473*6274f212SChris Wilson * the same request, so that no single request 2474*6274f212SChris Wilson * can run indefinitely (such as a chain of 2475*6274f212SChris Wilson * batches). The only time we do not increment 2476*6274f212SChris Wilson * the hangcheck score on this ring, if this 2477*6274f212SChris Wilson * ring is in a legitimate wait for another 2478*6274f212SChris Wilson * ring. In that case the waiting ring is a 2479*6274f212SChris Wilson * victim and we want to be sure we catch the 2480*6274f212SChris Wilson * right culprit. Then every time we do kick 2481*6274f212SChris Wilson * the ring, add a small increment to the 2482*6274f212SChris Wilson * score so that we can catch a batch that is 2483*6274f212SChris Wilson * being repeatedly kicked and so responsible 2484*6274f212SChris Wilson * for stalling the machine. 24859107e9d2SChris Wilson */ 2486*6274f212SChris Wilson switch (ring_stuck(ring, acthd)) { 2487*6274f212SChris Wilson case wait: 2488*6274f212SChris Wilson score = 0; 2489*6274f212SChris Wilson break; 2490*6274f212SChris Wilson case active: 24919107e9d2SChris Wilson score = BUSY; 2492*6274f212SChris Wilson break; 2493*6274f212SChris Wilson case kick: 2494*6274f212SChris Wilson score = KICK; 2495*6274f212SChris Wilson break; 2496*6274f212SChris Wilson case hung: 2497*6274f212SChris Wilson score = HUNG; 2498*6274f212SChris Wilson stuck[i] = true; 2499*6274f212SChris Wilson break; 2500*6274f212SChris Wilson } 25019107e9d2SChris Wilson ring->hangcheck.score += score; 250205407ff8SMika Kuoppala } 25039107e9d2SChris Wilson } else { 25049107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 25059107e9d2SChris Wilson * attempts across multiple batches. 25069107e9d2SChris Wilson */ 25079107e9d2SChris Wilson if (ring->hangcheck.score > 0) 25089107e9d2SChris Wilson ring->hangcheck.score--; 2509cbb465e7SChris Wilson } 2510f65d9421SBen Gamari 251105407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 251205407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 25139107e9d2SChris Wilson busy_count += busy; 251405407ff8SMika Kuoppala } 251505407ff8SMika Kuoppala 251605407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 25179107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 251805407ff8SMika Kuoppala rings_hung++; 251905407ff8SMika Kuoppala DRM_ERROR("%s: %s on %s 0x%x\n", ring->name, 252005407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 252105407ff8SMika Kuoppala stuck[i] ? "addr" : "seqno", 252205407ff8SMika Kuoppala stuck[i] ? ring->hangcheck.acthd & HEAD_ADDR : 252305407ff8SMika Kuoppala ring->hangcheck.seqno); 252405407ff8SMika Kuoppala } 252505407ff8SMika Kuoppala } 252605407ff8SMika Kuoppala 252705407ff8SMika Kuoppala if (rings_hung) 252805407ff8SMika Kuoppala return i915_handle_error(dev, true); 252905407ff8SMika Kuoppala 253005407ff8SMika Kuoppala if (busy_count) 253105407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 253205407ff8SMika Kuoppala * being added */ 253399584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 253405407ff8SMika Kuoppala round_jiffies_up(jiffies + 253505407ff8SMika Kuoppala DRM_I915_HANGCHECK_JIFFIES)); 2536f65d9421SBen Gamari } 2537f65d9421SBen Gamari 253891738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 253991738a95SPaulo Zanoni { 254091738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 254191738a95SPaulo Zanoni 254291738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 254391738a95SPaulo Zanoni return; 254491738a95SPaulo Zanoni 254591738a95SPaulo Zanoni /* south display irq */ 254691738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 254791738a95SPaulo Zanoni /* 254891738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 254991738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 255091738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 255191738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 255291738a95SPaulo Zanoni */ 255391738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 255491738a95SPaulo Zanoni POSTING_READ(SDEIER); 255591738a95SPaulo Zanoni } 255691738a95SPaulo Zanoni 2557c0e09200SDave Airlie /* drm_dma.h hooks 2558c0e09200SDave Airlie */ 2559f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2560036a4a7dSZhenyu Wang { 2561036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2562036a4a7dSZhenyu Wang 25634697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 25644697995bSJesse Barnes 2565036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2566bdfcdb63SDaniel Vetter 2567036a4a7dSZhenyu Wang /* XXX hotplug from PCH */ 2568036a4a7dSZhenyu Wang 2569036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2570036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 25713143a2bfSChris Wilson POSTING_READ(DEIER); 2572036a4a7dSZhenyu Wang 2573036a4a7dSZhenyu Wang /* and GT */ 2574036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2575036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 25763143a2bfSChris Wilson POSTING_READ(GTIER); 2577c650156aSZhenyu Wang 257891738a95SPaulo Zanoni ibx_irq_preinstall(dev); 25797d99163dSBen Widawsky } 25807d99163dSBen Widawsky 25817d99163dSBen Widawsky static void ivybridge_irq_preinstall(struct drm_device *dev) 25827d99163dSBen Widawsky { 25837d99163dSBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 25847d99163dSBen Widawsky 25857d99163dSBen Widawsky atomic_set(&dev_priv->irq_received, 0); 25867d99163dSBen Widawsky 25877d99163dSBen Widawsky I915_WRITE(HWSTAM, 0xeffe); 25887d99163dSBen Widawsky 25897d99163dSBen Widawsky /* XXX hotplug from PCH */ 25907d99163dSBen Widawsky 25917d99163dSBen Widawsky I915_WRITE(DEIMR, 0xffffffff); 25927d99163dSBen Widawsky I915_WRITE(DEIER, 0x0); 25937d99163dSBen Widawsky POSTING_READ(DEIER); 25947d99163dSBen Widawsky 25957d99163dSBen Widawsky /* and GT */ 25967d99163dSBen Widawsky I915_WRITE(GTIMR, 0xffffffff); 25977d99163dSBen Widawsky I915_WRITE(GTIER, 0x0); 25987d99163dSBen Widawsky POSTING_READ(GTIER); 25997d99163dSBen Widawsky 2600eda63ffbSBen Widawsky /* Power management */ 2601eda63ffbSBen Widawsky I915_WRITE(GEN6_PMIMR, 0xffffffff); 2602eda63ffbSBen Widawsky I915_WRITE(GEN6_PMIER, 0x0); 2603eda63ffbSBen Widawsky POSTING_READ(GEN6_PMIER); 2604eda63ffbSBen Widawsky 260591738a95SPaulo Zanoni ibx_irq_preinstall(dev); 2606036a4a7dSZhenyu Wang } 2607036a4a7dSZhenyu Wang 26087e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 26097e231dbeSJesse Barnes { 26107e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26117e231dbeSJesse Barnes int pipe; 26127e231dbeSJesse Barnes 26137e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 26147e231dbeSJesse Barnes 26157e231dbeSJesse Barnes /* VLV magic */ 26167e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 26177e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 26187e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 26197e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 26207e231dbeSJesse Barnes 26217e231dbeSJesse Barnes /* and GT */ 26227e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 26237e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 26247e231dbeSJesse Barnes I915_WRITE(GTIMR, 0xffffffff); 26257e231dbeSJesse Barnes I915_WRITE(GTIER, 0x0); 26267e231dbeSJesse Barnes POSTING_READ(GTIER); 26277e231dbeSJesse Barnes 26287e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 26297e231dbeSJesse Barnes 26307e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 26317e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 26327e231dbeSJesse Barnes for_each_pipe(pipe) 26337e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 26347e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26357e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 26367e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 26377e231dbeSJesse Barnes POSTING_READ(VLV_IER); 26387e231dbeSJesse Barnes } 26397e231dbeSJesse Barnes 264082a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 264182a28bcfSDaniel Vetter { 264282a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 264382a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 264482a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 264582a28bcfSDaniel Vetter u32 mask = ~I915_READ(SDEIMR); 264682a28bcfSDaniel Vetter u32 hotplug; 264782a28bcfSDaniel Vetter 264882a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2649995e6b3dSEgbert Eich mask &= ~SDE_HOTPLUG_MASK; 265082a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2651cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 265282a28bcfSDaniel Vetter mask |= hpd_ibx[intel_encoder->hpd_pin]; 265382a28bcfSDaniel Vetter } else { 2654995e6b3dSEgbert Eich mask &= ~SDE_HOTPLUG_MASK_CPT; 265582a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2656cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 265782a28bcfSDaniel Vetter mask |= hpd_cpt[intel_encoder->hpd_pin]; 265882a28bcfSDaniel Vetter } 265982a28bcfSDaniel Vetter 266082a28bcfSDaniel Vetter I915_WRITE(SDEIMR, ~mask); 266182a28bcfSDaniel Vetter 26627fe0b973SKeith Packard /* 26637fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 26647fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 26657fe0b973SKeith Packard * 26667fe0b973SKeith Packard * This register is the same on all known PCH chips. 26677fe0b973SKeith Packard */ 26687fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 26697fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 26707fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 26717fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 26727fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 26737fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 26747fe0b973SKeith Packard } 26757fe0b973SKeith Packard 2676d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2677d46da437SPaulo Zanoni { 2678d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 267982a28bcfSDaniel Vetter u32 mask; 2680d46da437SPaulo Zanoni 2681692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2682692a04cfSDaniel Vetter return; 2683692a04cfSDaniel Vetter 26848664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 26858664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2686de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 26878664281bSPaulo Zanoni } else { 26888664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 26898664281bSPaulo Zanoni 26908664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 26918664281bSPaulo Zanoni } 2692ab5c608bSBen Widawsky 2693d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2694d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2695d46da437SPaulo Zanoni } 2696d46da437SPaulo Zanoni 2697f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2698036a4a7dSZhenyu Wang { 2699036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2700036a4a7dSZhenyu Wang /* enable kind of interrupts always enabled */ 2701013d5aa2SJesse Barnes u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2702ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 27038664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2704de032bf4SPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON; 2705cc609d5dSBen Widawsky u32 gt_irqs; 2706036a4a7dSZhenyu Wang 27071ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2708036a4a7dSZhenyu Wang 2709036a4a7dSZhenyu Wang /* should always can generate irq */ 2710036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 27111ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 27121ec14ad3SChris Wilson I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 27133143a2bfSChris Wilson POSTING_READ(DEIER); 2714036a4a7dSZhenyu Wang 27151ec14ad3SChris Wilson dev_priv->gt_irq_mask = ~0; 2716036a4a7dSZhenyu Wang 2717036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 27181ec14ad3SChris Wilson I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2719881f47b6SXiang, Haihao 2720cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT; 2721cc609d5dSBen Widawsky 27221ec14ad3SChris Wilson if (IS_GEN6(dev)) 2723cc609d5dSBen Widawsky gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 27241ec14ad3SChris Wilson else 2725cc609d5dSBen Widawsky gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2726cc609d5dSBen Widawsky ILK_BSD_USER_INTERRUPT; 2727cc609d5dSBen Widawsky 2728cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 27293143a2bfSChris Wilson POSTING_READ(GTIER); 2730036a4a7dSZhenyu Wang 2731d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 27327fe0b973SKeith Packard 2733f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 2734f97108d1SJesse Barnes /* Clear & enable PCU event interrupts */ 2735f97108d1SJesse Barnes I915_WRITE(DEIIR, DE_PCU_EVENT); 2736f97108d1SJesse Barnes I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 2737f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2738f97108d1SJesse Barnes } 2739f97108d1SJesse Barnes 2740036a4a7dSZhenyu Wang return 0; 2741036a4a7dSZhenyu Wang } 2742036a4a7dSZhenyu Wang 2743f71d4af4SJesse Barnes static int ivybridge_irq_postinstall(struct drm_device *dev) 2744b1f14ad0SJesse Barnes { 2745b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2746b1f14ad0SJesse Barnes /* enable kind of interrupts always enabled */ 2747b615b57aSChris Wilson u32 display_mask = 2748b615b57aSChris Wilson DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2749b615b57aSChris Wilson DE_PLANEC_FLIP_DONE_IVB | 2750b615b57aSChris Wilson DE_PLANEB_FLIP_DONE_IVB | 2751ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE_IVB | 27528664281bSPaulo Zanoni DE_AUX_CHANNEL_A_IVB | 27538664281bSPaulo Zanoni DE_ERR_INT_IVB; 275412638c57SBen Widawsky u32 pm_irqs = GEN6_PM_RPS_EVENTS; 2755cc609d5dSBen Widawsky u32 gt_irqs; 2756b1f14ad0SJesse Barnes 2757b1f14ad0SJesse Barnes dev_priv->irq_mask = ~display_mask; 2758b1f14ad0SJesse Barnes 2759b1f14ad0SJesse Barnes /* should always can generate irq */ 27608664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2761b1f14ad0SJesse Barnes I915_WRITE(DEIIR, I915_READ(DEIIR)); 2762b1f14ad0SJesse Barnes I915_WRITE(DEIMR, dev_priv->irq_mask); 2763b615b57aSChris Wilson I915_WRITE(DEIER, 2764b615b57aSChris Wilson display_mask | 2765b615b57aSChris Wilson DE_PIPEC_VBLANK_IVB | 2766b615b57aSChris Wilson DE_PIPEB_VBLANK_IVB | 2767b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB); 2768b1f14ad0SJesse Barnes POSTING_READ(DEIER); 2769b1f14ad0SJesse Barnes 2770cc609d5dSBen Widawsky dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2771b1f14ad0SJesse Barnes 2772b1f14ad0SJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2773b1f14ad0SJesse Barnes I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2774b1f14ad0SJesse Barnes 2775cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2776cc609d5dSBen Widawsky GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2777cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 2778b1f14ad0SJesse Barnes POSTING_READ(GTIER); 2779b1f14ad0SJesse Barnes 278012638c57SBen Widawsky I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 278112638c57SBen Widawsky if (HAS_VEBOX(dev)) 278212638c57SBen Widawsky pm_irqs |= PM_VEBOX_USER_INTERRUPT | 278312638c57SBen Widawsky PM_VEBOX_CS_ERROR_INTERRUPT; 278412638c57SBen Widawsky 278512638c57SBen Widawsky /* Our enable/disable rps functions may touch these registers so 278612638c57SBen Widawsky * make sure to set a known state for only the non-RPS bits. 278712638c57SBen Widawsky * The RMW is extra paranoia since this should be called after being set 278812638c57SBen Widawsky * to a known state in preinstall. 278912638c57SBen Widawsky * */ 279012638c57SBen Widawsky I915_WRITE(GEN6_PMIMR, 279112638c57SBen Widawsky (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); 279212638c57SBen Widawsky I915_WRITE(GEN6_PMIER, 279312638c57SBen Widawsky (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); 279412638c57SBen Widawsky POSTING_READ(GEN6_PMIER); 2795eda63ffbSBen Widawsky 2796d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 27977fe0b973SKeith Packard 2798b1f14ad0SJesse Barnes return 0; 2799b1f14ad0SJesse Barnes } 2800b1f14ad0SJesse Barnes 28017e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 28027e231dbeSJesse Barnes { 28037e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2804cc609d5dSBen Widawsky u32 gt_irqs; 28057e231dbeSJesse Barnes u32 enable_mask; 280631acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 28077e231dbeSJesse Barnes 28087e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 280931acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 281031acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 281131acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 28127e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28137e231dbeSJesse Barnes 281431acc7f5SJesse Barnes /* 281531acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 281631acc7f5SJesse Barnes * toggle them based on usage. 281731acc7f5SJesse Barnes */ 281831acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 281931acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 282031acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28217e231dbeSJesse Barnes 282220afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 282320afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 282420afbda2SDaniel Vetter 28257e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 28267e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 28277e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28287e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 28297e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 28307e231dbeSJesse Barnes POSTING_READ(VLV_IER); 28317e231dbeSJesse Barnes 283231acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2833515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 283431acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 283531acc7f5SJesse Barnes 28367e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28377e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28387e231dbeSJesse Barnes 283931acc7f5SJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 284031acc7f5SJesse Barnes I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 28413bcedbe5SJesse Barnes 2842cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2843cc609d5dSBen Widawsky GT_BLT_USER_INTERRUPT; 2844cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 28457e231dbeSJesse Barnes POSTING_READ(GTIER); 28467e231dbeSJesse Barnes 28477e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 28487e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 28497e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 28507e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 28517e231dbeSJesse Barnes #endif 28527e231dbeSJesse Barnes 28537e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 285420afbda2SDaniel Vetter 285520afbda2SDaniel Vetter return 0; 285620afbda2SDaniel Vetter } 285720afbda2SDaniel Vetter 28587e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 28597e231dbeSJesse Barnes { 28607e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 28617e231dbeSJesse Barnes int pipe; 28627e231dbeSJesse Barnes 28637e231dbeSJesse Barnes if (!dev_priv) 28647e231dbeSJesse Barnes return; 28657e231dbeSJesse Barnes 2866ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2867ac4c16c5SEgbert Eich 28687e231dbeSJesse Barnes for_each_pipe(pipe) 28697e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 28707e231dbeSJesse Barnes 28717e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 28727e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 28737e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 28747e231dbeSJesse Barnes for_each_pipe(pipe) 28757e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 28767e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28777e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 28787e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 28797e231dbeSJesse Barnes POSTING_READ(VLV_IER); 28807e231dbeSJesse Barnes } 28817e231dbeSJesse Barnes 2882f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2883036a4a7dSZhenyu Wang { 2884036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 28854697995bSJesse Barnes 28864697995bSJesse Barnes if (!dev_priv) 28874697995bSJesse Barnes return; 28884697995bSJesse Barnes 2889ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2890ac4c16c5SEgbert Eich 2891036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2892036a4a7dSZhenyu Wang 2893036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2894036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2895036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 28968664281bSPaulo Zanoni if (IS_GEN7(dev)) 28978664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2898036a4a7dSZhenyu Wang 2899036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2900036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2901036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2902192aac1fSKeith Packard 2903ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2904ab5c608bSBen Widawsky return; 2905ab5c608bSBen Widawsky 2906192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2907192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2908192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 29098664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 29108664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2911036a4a7dSZhenyu Wang } 2912036a4a7dSZhenyu Wang 2913c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2914c2798b19SChris Wilson { 2915c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2916c2798b19SChris Wilson int pipe; 2917c2798b19SChris Wilson 2918c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2919c2798b19SChris Wilson 2920c2798b19SChris Wilson for_each_pipe(pipe) 2921c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2922c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2923c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2924c2798b19SChris Wilson POSTING_READ16(IER); 2925c2798b19SChris Wilson } 2926c2798b19SChris Wilson 2927c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2928c2798b19SChris Wilson { 2929c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2930c2798b19SChris Wilson 2931c2798b19SChris Wilson I915_WRITE16(EMR, 2932c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2933c2798b19SChris Wilson 2934c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2935c2798b19SChris Wilson dev_priv->irq_mask = 2936c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2937c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2938c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2939c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2940c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2941c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2942c2798b19SChris Wilson 2943c2798b19SChris Wilson I915_WRITE16(IER, 2944c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2945c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2946c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2947c2798b19SChris Wilson I915_USER_INTERRUPT); 2948c2798b19SChris Wilson POSTING_READ16(IER); 2949c2798b19SChris Wilson 2950c2798b19SChris Wilson return 0; 2951c2798b19SChris Wilson } 2952c2798b19SChris Wilson 295390a72f87SVille Syrjälä /* 295490a72f87SVille Syrjälä * Returns true when a page flip has completed. 295590a72f87SVille Syrjälä */ 295690a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 295790a72f87SVille Syrjälä int pipe, u16 iir) 295890a72f87SVille Syrjälä { 295990a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 296090a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 296190a72f87SVille Syrjälä 296290a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 296390a72f87SVille Syrjälä return false; 296490a72f87SVille Syrjälä 296590a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 296690a72f87SVille Syrjälä return false; 296790a72f87SVille Syrjälä 296890a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 296990a72f87SVille Syrjälä 297090a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 297190a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 297290a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 297390a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 297490a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 297590a72f87SVille Syrjälä */ 297690a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 297790a72f87SVille Syrjälä return false; 297890a72f87SVille Syrjälä 297990a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 298090a72f87SVille Syrjälä 298190a72f87SVille Syrjälä return true; 298290a72f87SVille Syrjälä } 298390a72f87SVille Syrjälä 2984ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2985c2798b19SChris Wilson { 2986c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2987c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2988c2798b19SChris Wilson u16 iir, new_iir; 2989c2798b19SChris Wilson u32 pipe_stats[2]; 2990c2798b19SChris Wilson unsigned long irqflags; 2991c2798b19SChris Wilson int irq_received; 2992c2798b19SChris Wilson int pipe; 2993c2798b19SChris Wilson u16 flip_mask = 2994c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2995c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2996c2798b19SChris Wilson 2997c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2998c2798b19SChris Wilson 2999c2798b19SChris Wilson iir = I915_READ16(IIR); 3000c2798b19SChris Wilson if (iir == 0) 3001c2798b19SChris Wilson return IRQ_NONE; 3002c2798b19SChris Wilson 3003c2798b19SChris Wilson while (iir & ~flip_mask) { 3004c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3005c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 3006c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 3007c2798b19SChris Wilson * interrupts (for non-MSI). 3008c2798b19SChris Wilson */ 3009c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3010c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3011c2798b19SChris Wilson i915_handle_error(dev, false); 3012c2798b19SChris Wilson 3013c2798b19SChris Wilson for_each_pipe(pipe) { 3014c2798b19SChris Wilson int reg = PIPESTAT(pipe); 3015c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3016c2798b19SChris Wilson 3017c2798b19SChris Wilson /* 3018c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 3019c2798b19SChris Wilson */ 3020c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3021c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3022c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3023c2798b19SChris Wilson pipe_name(pipe)); 3024c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3025c2798b19SChris Wilson irq_received = 1; 3026c2798b19SChris Wilson } 3027c2798b19SChris Wilson } 3028c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3029c2798b19SChris Wilson 3030c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 3031c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 3032c2798b19SChris Wilson 3033d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 3034c2798b19SChris Wilson 3035c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 3036c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3037c2798b19SChris Wilson 3038c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 303990a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 304090a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 3041c2798b19SChris Wilson 3042c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 304390a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 304490a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 3045c2798b19SChris Wilson 3046c2798b19SChris Wilson iir = new_iir; 3047c2798b19SChris Wilson } 3048c2798b19SChris Wilson 3049c2798b19SChris Wilson return IRQ_HANDLED; 3050c2798b19SChris Wilson } 3051c2798b19SChris Wilson 3052c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 3053c2798b19SChris Wilson { 3054c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3055c2798b19SChris Wilson int pipe; 3056c2798b19SChris Wilson 3057c2798b19SChris Wilson for_each_pipe(pipe) { 3058c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 3059c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3060c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3061c2798b19SChris Wilson } 3062c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 3063c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 3064c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 3065c2798b19SChris Wilson } 3066c2798b19SChris Wilson 3067a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 3068a266c7d5SChris Wilson { 3069a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3070a266c7d5SChris Wilson int pipe; 3071a266c7d5SChris Wilson 3072a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3073a266c7d5SChris Wilson 3074a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3075a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3076a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3077a266c7d5SChris Wilson } 3078a266c7d5SChris Wilson 307900d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 3080a266c7d5SChris Wilson for_each_pipe(pipe) 3081a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3082a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3083a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3084a266c7d5SChris Wilson POSTING_READ(IER); 3085a266c7d5SChris Wilson } 3086a266c7d5SChris Wilson 3087a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 3088a266c7d5SChris Wilson { 3089a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 309038bde180SChris Wilson u32 enable_mask; 3091a266c7d5SChris Wilson 309238bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 309338bde180SChris Wilson 309438bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 309538bde180SChris Wilson dev_priv->irq_mask = 309638bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 309738bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 309838bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 309938bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 310038bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 310138bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 310238bde180SChris Wilson 310338bde180SChris Wilson enable_mask = 310438bde180SChris Wilson I915_ASLE_INTERRUPT | 310538bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 310638bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 310738bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 310838bde180SChris Wilson I915_USER_INTERRUPT; 310938bde180SChris Wilson 3110a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 311120afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 311220afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 311320afbda2SDaniel Vetter 3114a266c7d5SChris Wilson /* Enable in IER... */ 3115a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3116a266c7d5SChris Wilson /* and unmask in IMR */ 3117a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3118a266c7d5SChris Wilson } 3119a266c7d5SChris Wilson 3120a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3121a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3122a266c7d5SChris Wilson POSTING_READ(IER); 3123a266c7d5SChris Wilson 3124f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 312520afbda2SDaniel Vetter 312620afbda2SDaniel Vetter return 0; 312720afbda2SDaniel Vetter } 312820afbda2SDaniel Vetter 312990a72f87SVille Syrjälä /* 313090a72f87SVille Syrjälä * Returns true when a page flip has completed. 313190a72f87SVille Syrjälä */ 313290a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 313390a72f87SVille Syrjälä int plane, int pipe, u32 iir) 313490a72f87SVille Syrjälä { 313590a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 313690a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 313790a72f87SVille Syrjälä 313890a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 313990a72f87SVille Syrjälä return false; 314090a72f87SVille Syrjälä 314190a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 314290a72f87SVille Syrjälä return false; 314390a72f87SVille Syrjälä 314490a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 314590a72f87SVille Syrjälä 314690a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 314790a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 314890a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 314990a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 315090a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 315190a72f87SVille Syrjälä */ 315290a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 315390a72f87SVille Syrjälä return false; 315490a72f87SVille Syrjälä 315590a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 315690a72f87SVille Syrjälä 315790a72f87SVille Syrjälä return true; 315890a72f87SVille Syrjälä } 315990a72f87SVille Syrjälä 3160ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 3161a266c7d5SChris Wilson { 3162a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3163a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 31648291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3165a266c7d5SChris Wilson unsigned long irqflags; 316638bde180SChris Wilson u32 flip_mask = 316738bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 316838bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 316938bde180SChris Wilson int pipe, ret = IRQ_NONE; 3170a266c7d5SChris Wilson 3171a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3172a266c7d5SChris Wilson 3173a266c7d5SChris Wilson iir = I915_READ(IIR); 317438bde180SChris Wilson do { 317538bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 31768291ee90SChris Wilson bool blc_event = false; 3177a266c7d5SChris Wilson 3178a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3179a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3180a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3181a266c7d5SChris Wilson * interrupts (for non-MSI). 3182a266c7d5SChris Wilson */ 3183a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3184a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3185a266c7d5SChris Wilson i915_handle_error(dev, false); 3186a266c7d5SChris Wilson 3187a266c7d5SChris Wilson for_each_pipe(pipe) { 3188a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3189a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3190a266c7d5SChris Wilson 319138bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 3192a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3193a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3194a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3195a266c7d5SChris Wilson pipe_name(pipe)); 3196a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 319738bde180SChris Wilson irq_received = true; 3198a266c7d5SChris Wilson } 3199a266c7d5SChris Wilson } 3200a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3201a266c7d5SChris Wilson 3202a266c7d5SChris Wilson if (!irq_received) 3203a266c7d5SChris Wilson break; 3204a266c7d5SChris Wilson 3205a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3206a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 3207a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3208a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3209b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3210a266c7d5SChris Wilson 3211a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3212a266c7d5SChris Wilson hotplug_status); 3213b543fb04SEgbert Eich if (hotplug_trigger) { 3214cd569aedSEgbert Eich if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915)) 3215cd569aedSEgbert Eich i915_hpd_irq_setup(dev); 3216a266c7d5SChris Wilson queue_work(dev_priv->wq, 3217a266c7d5SChris Wilson &dev_priv->hotplug_work); 3218b543fb04SEgbert Eich } 3219a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 322038bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 3221a266c7d5SChris Wilson } 3222a266c7d5SChris Wilson 322338bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 3224a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3225a266c7d5SChris Wilson 3226a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3227a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3228a266c7d5SChris Wilson 3229a266c7d5SChris Wilson for_each_pipe(pipe) { 323038bde180SChris Wilson int plane = pipe; 323138bde180SChris Wilson if (IS_MOBILE(dev)) 323238bde180SChris Wilson plane = !plane; 32335e2032d4SVille Syrjälä 323490a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 323590a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 323690a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3237a266c7d5SChris Wilson 3238a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3239a266c7d5SChris Wilson blc_event = true; 3240a266c7d5SChris Wilson } 3241a266c7d5SChris Wilson 3242a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3243a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3244a266c7d5SChris Wilson 3245a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3246a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3247a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3248a266c7d5SChris Wilson * we would never get another interrupt. 3249a266c7d5SChris Wilson * 3250a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3251a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3252a266c7d5SChris Wilson * another one. 3253a266c7d5SChris Wilson * 3254a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3255a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3256a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3257a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3258a266c7d5SChris Wilson * stray interrupts. 3259a266c7d5SChris Wilson */ 326038bde180SChris Wilson ret = IRQ_HANDLED; 3261a266c7d5SChris Wilson iir = new_iir; 326238bde180SChris Wilson } while (iir & ~flip_mask); 3263a266c7d5SChris Wilson 3264d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 32658291ee90SChris Wilson 3266a266c7d5SChris Wilson return ret; 3267a266c7d5SChris Wilson } 3268a266c7d5SChris Wilson 3269a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 3270a266c7d5SChris Wilson { 3271a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3272a266c7d5SChris Wilson int pipe; 3273a266c7d5SChris Wilson 3274ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3275ac4c16c5SEgbert Eich 3276a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3277a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3278a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3279a266c7d5SChris Wilson } 3280a266c7d5SChris Wilson 328100d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 328255b39755SChris Wilson for_each_pipe(pipe) { 328355b39755SChris Wilson /* Clear enable bits; then clear status bits */ 3284a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 328555b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 328655b39755SChris Wilson } 3287a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3288a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3289a266c7d5SChris Wilson 3290a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3291a266c7d5SChris Wilson } 3292a266c7d5SChris Wilson 3293a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 3294a266c7d5SChris Wilson { 3295a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3296a266c7d5SChris Wilson int pipe; 3297a266c7d5SChris Wilson 3298a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3299a266c7d5SChris Wilson 3300a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3301a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3302a266c7d5SChris Wilson 3303a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 3304a266c7d5SChris Wilson for_each_pipe(pipe) 3305a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3306a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3307a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3308a266c7d5SChris Wilson POSTING_READ(IER); 3309a266c7d5SChris Wilson } 3310a266c7d5SChris Wilson 3311a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 3312a266c7d5SChris Wilson { 3313a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3314bbba0a97SChris Wilson u32 enable_mask; 3315a266c7d5SChris Wilson u32 error_mask; 3316a266c7d5SChris Wilson 3317a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 3318bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3319adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 3320bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3321bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3322bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3323bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3324bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3325bbba0a97SChris Wilson 3326bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 332721ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 332821ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3329bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 3330bbba0a97SChris Wilson 3331bbba0a97SChris Wilson if (IS_G4X(dev)) 3332bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 3333a266c7d5SChris Wilson 3334515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3335a266c7d5SChris Wilson 3336a266c7d5SChris Wilson /* 3337a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 3338a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 3339a266c7d5SChris Wilson */ 3340a266c7d5SChris Wilson if (IS_G4X(dev)) { 3341a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 3342a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 3343a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 3344a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3345a266c7d5SChris Wilson } else { 3346a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 3347a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3348a266c7d5SChris Wilson } 3349a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 3350a266c7d5SChris Wilson 3351a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3352a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3353a266c7d5SChris Wilson POSTING_READ(IER); 3354a266c7d5SChris Wilson 335520afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 335620afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 335720afbda2SDaniel Vetter 3358f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 335920afbda2SDaniel Vetter 336020afbda2SDaniel Vetter return 0; 336120afbda2SDaniel Vetter } 336220afbda2SDaniel Vetter 3363bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 336420afbda2SDaniel Vetter { 336520afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3366e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3367cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 336820afbda2SDaniel Vetter u32 hotplug_en; 336920afbda2SDaniel Vetter 3370bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 3371bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3372bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3373adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 3374e5868a31SEgbert Eich /* enable bits are the same for all generations */ 3375cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3376cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3377cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3378a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 3379a266c7d5SChris Wilson to generate a spurious hotplug event about three 3380a266c7d5SChris Wilson seconds later. So just do it once. 3381a266c7d5SChris Wilson */ 3382a266c7d5SChris Wilson if (IS_G4X(dev)) 3383a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 338485fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3385a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3386a266c7d5SChris Wilson 3387a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 3388a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3389a266c7d5SChris Wilson } 3390bac56d5bSEgbert Eich } 3391a266c7d5SChris Wilson 3392ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 3393a266c7d5SChris Wilson { 3394a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3395a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3396a266c7d5SChris Wilson u32 iir, new_iir; 3397a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 3398a266c7d5SChris Wilson unsigned long irqflags; 3399a266c7d5SChris Wilson int irq_received; 3400a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 340121ad8330SVille Syrjälä u32 flip_mask = 340221ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 340321ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3404a266c7d5SChris Wilson 3405a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3406a266c7d5SChris Wilson 3407a266c7d5SChris Wilson iir = I915_READ(IIR); 3408a266c7d5SChris Wilson 3409a266c7d5SChris Wilson for (;;) { 34102c8ba29fSChris Wilson bool blc_event = false; 34112c8ba29fSChris Wilson 341221ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 3413a266c7d5SChris Wilson 3414a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3415a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3416a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3417a266c7d5SChris Wilson * interrupts (for non-MSI). 3418a266c7d5SChris Wilson */ 3419a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3420a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3421a266c7d5SChris Wilson i915_handle_error(dev, false); 3422a266c7d5SChris Wilson 3423a266c7d5SChris Wilson for_each_pipe(pipe) { 3424a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3425a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3426a266c7d5SChris Wilson 3427a266c7d5SChris Wilson /* 3428a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 3429a266c7d5SChris Wilson */ 3430a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3431a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3432a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3433a266c7d5SChris Wilson pipe_name(pipe)); 3434a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3435a266c7d5SChris Wilson irq_received = 1; 3436a266c7d5SChris Wilson } 3437a266c7d5SChris Wilson } 3438a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3439a266c7d5SChris Wilson 3440a266c7d5SChris Wilson if (!irq_received) 3441a266c7d5SChris Wilson break; 3442a266c7d5SChris Wilson 3443a266c7d5SChris Wilson ret = IRQ_HANDLED; 3444a266c7d5SChris Wilson 3445a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3446adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3447a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3448b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3449b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 3450b543fb04SEgbert Eich HOTPLUG_INT_STATUS_I965); 3451a266c7d5SChris Wilson 3452a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3453a266c7d5SChris Wilson hotplug_status); 3454b543fb04SEgbert Eich if (hotplug_trigger) { 3455cd569aedSEgbert Eich if (hotplug_irq_storm_detect(dev, hotplug_trigger, 3456cd569aedSEgbert Eich IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965)) 3457cd569aedSEgbert Eich i915_hpd_irq_setup(dev); 3458a266c7d5SChris Wilson queue_work(dev_priv->wq, 3459a266c7d5SChris Wilson &dev_priv->hotplug_work); 3460b543fb04SEgbert Eich } 3461a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3462a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 3463a266c7d5SChris Wilson } 3464a266c7d5SChris Wilson 346521ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3466a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3467a266c7d5SChris Wilson 3468a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3469a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3470a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3471a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3472a266c7d5SChris Wilson 3473a266c7d5SChris Wilson for_each_pipe(pipe) { 34742c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 347590a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 347690a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3477a266c7d5SChris Wilson 3478a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3479a266c7d5SChris Wilson blc_event = true; 3480a266c7d5SChris Wilson } 3481a266c7d5SChris Wilson 3482a266c7d5SChris Wilson 3483a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3484a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3485a266c7d5SChris Wilson 3486515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3487515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3488515ac2bbSDaniel Vetter 3489a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3490a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3491a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3492a266c7d5SChris Wilson * we would never get another interrupt. 3493a266c7d5SChris Wilson * 3494a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3495a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3496a266c7d5SChris Wilson * another one. 3497a266c7d5SChris Wilson * 3498a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3499a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3500a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3501a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3502a266c7d5SChris Wilson * stray interrupts. 3503a266c7d5SChris Wilson */ 3504a266c7d5SChris Wilson iir = new_iir; 3505a266c7d5SChris Wilson } 3506a266c7d5SChris Wilson 3507d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 35082c8ba29fSChris Wilson 3509a266c7d5SChris Wilson return ret; 3510a266c7d5SChris Wilson } 3511a266c7d5SChris Wilson 3512a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3513a266c7d5SChris Wilson { 3514a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3515a266c7d5SChris Wilson int pipe; 3516a266c7d5SChris Wilson 3517a266c7d5SChris Wilson if (!dev_priv) 3518a266c7d5SChris Wilson return; 3519a266c7d5SChris Wilson 3520ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3521ac4c16c5SEgbert Eich 3522a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3523a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3524a266c7d5SChris Wilson 3525a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3526a266c7d5SChris Wilson for_each_pipe(pipe) 3527a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3528a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3529a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3530a266c7d5SChris Wilson 3531a266c7d5SChris Wilson for_each_pipe(pipe) 3532a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3533a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3534a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3535a266c7d5SChris Wilson } 3536a266c7d5SChris Wilson 3537ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3538ac4c16c5SEgbert Eich { 3539ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3540ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3541ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3542ac4c16c5SEgbert Eich unsigned long irqflags; 3543ac4c16c5SEgbert Eich int i; 3544ac4c16c5SEgbert Eich 3545ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3546ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3547ac4c16c5SEgbert Eich struct drm_connector *connector; 3548ac4c16c5SEgbert Eich 3549ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3550ac4c16c5SEgbert Eich continue; 3551ac4c16c5SEgbert Eich 3552ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3553ac4c16c5SEgbert Eich 3554ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3555ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3556ac4c16c5SEgbert Eich 3557ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3558ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3559ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3560ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3561ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3562ac4c16c5SEgbert Eich if (!connector->polled) 3563ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3564ac4c16c5SEgbert Eich } 3565ac4c16c5SEgbert Eich } 3566ac4c16c5SEgbert Eich } 3567ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3568ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3569ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3570ac4c16c5SEgbert Eich } 3571ac4c16c5SEgbert Eich 3572f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3573f71d4af4SJesse Barnes { 35748b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 35758b2e326dSChris Wilson 35768b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 357799584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3578c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3579a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 35808b2e326dSChris Wilson 358199584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 358299584db3SDaniel Vetter i915_hangcheck_elapsed, 358361bac78eSDaniel Vetter (unsigned long) dev); 3584ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3585ac4c16c5SEgbert Eich (unsigned long) dev_priv); 358661bac78eSDaniel Vetter 358797a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 35889ee32feaSDaniel Vetter 3589f71d4af4SJesse Barnes dev->driver->get_vblank_counter = i915_get_vblank_counter; 3590f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 35917d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3592f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3593f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3594f71d4af4SJesse Barnes } 3595f71d4af4SJesse Barnes 3596c3613de9SKeith Packard if (drm_core_check_feature(dev, DRIVER_MODESET)) 3597f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3598c3613de9SKeith Packard else 3599c3613de9SKeith Packard dev->driver->get_vblank_timestamp = NULL; 3600f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3601f71d4af4SJesse Barnes 36027e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 36037e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 36047e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 36057e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 36067e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 36077e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 36087e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3609fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 36104a06e201SDaniel Vetter } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 36117d99163dSBen Widawsky /* Share uninstall handlers with ILK/SNB */ 3612f71d4af4SJesse Barnes dev->driver->irq_handler = ivybridge_irq_handler; 36137d99163dSBen Widawsky dev->driver->irq_preinstall = ivybridge_irq_preinstall; 3614f71d4af4SJesse Barnes dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3615f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3616f71d4af4SJesse Barnes dev->driver->enable_vblank = ivybridge_enable_vblank; 3617f71d4af4SJesse Barnes dev->driver->disable_vblank = ivybridge_disable_vblank; 361882a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3619f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3620f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3621f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3622f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3623f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3624f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3625f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 362682a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3627f71d4af4SJesse Barnes } else { 3628c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3629c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3630c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3631c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3632c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3633a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3634a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3635a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3636a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3637a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 363820afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3639c2798b19SChris Wilson } else { 3640a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3641a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3642a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3643a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3644bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3645c2798b19SChris Wilson } 3646f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3647f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3648f71d4af4SJesse Barnes } 3649f71d4af4SJesse Barnes } 365020afbda2SDaniel Vetter 365120afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 365220afbda2SDaniel Vetter { 365320afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3654821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3655821450c6SEgbert Eich struct drm_connector *connector; 3656821450c6SEgbert Eich int i; 365720afbda2SDaniel Vetter 3658821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3659821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3660821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3661821450c6SEgbert Eich } 3662821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3663821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3664821450c6SEgbert Eich connector->polled = intel_connector->polled; 3665821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3666821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3667821450c6SEgbert Eich } 366820afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 366920afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 367020afbda2SDaniel Vetter } 3671