1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 83995b6762SChris Wilson static void 84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85036a4a7dSZhenyu Wang { 864bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 874bc9d430SDaniel Vetter 881ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 891ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 901ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 913143a2bfSChris Wilson POSTING_READ(DEIMR); 92036a4a7dSZhenyu Wang } 93036a4a7dSZhenyu Wang } 94036a4a7dSZhenyu Wang 950ff9800aSPaulo Zanoni static void 96f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97036a4a7dSZhenyu Wang { 984bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 994bc9d430SDaniel Vetter 1001ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1011ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1021ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1033143a2bfSChris Wilson POSTING_READ(DEIMR); 104036a4a7dSZhenyu Wang } 105036a4a7dSZhenyu Wang } 106036a4a7dSZhenyu Wang 1078664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1088664281bSPaulo Zanoni { 1098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1108664281bSPaulo Zanoni struct intel_crtc *crtc; 1118664281bSPaulo Zanoni enum pipe pipe; 1128664281bSPaulo Zanoni 1134bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1144bc9d430SDaniel Vetter 1158664281bSPaulo Zanoni for_each_pipe(pipe) { 1168664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1178664281bSPaulo Zanoni 1188664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 1198664281bSPaulo Zanoni return false; 1208664281bSPaulo Zanoni } 1218664281bSPaulo Zanoni 1228664281bSPaulo Zanoni return true; 1238664281bSPaulo Zanoni } 1248664281bSPaulo Zanoni 1258664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 1268664281bSPaulo Zanoni { 1278664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1288664281bSPaulo Zanoni enum pipe pipe; 1298664281bSPaulo Zanoni struct intel_crtc *crtc; 1308664281bSPaulo Zanoni 131fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 132fee884edSDaniel Vetter 1338664281bSPaulo Zanoni for_each_pipe(pipe) { 1348664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1358664281bSPaulo Zanoni 1368664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 1378664281bSPaulo Zanoni return false; 1388664281bSPaulo Zanoni } 1398664281bSPaulo Zanoni 1408664281bSPaulo Zanoni return true; 1418664281bSPaulo Zanoni } 1428664281bSPaulo Zanoni 1438664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 1448664281bSPaulo Zanoni enum pipe pipe, bool enable) 1458664281bSPaulo Zanoni { 1468664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1478664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 1488664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 1498664281bSPaulo Zanoni 1508664281bSPaulo Zanoni if (enable) 1518664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1528664281bSPaulo Zanoni else 1538664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1548664281bSPaulo Zanoni } 1558664281bSPaulo Zanoni 1568664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 157*7336df65SDaniel Vetter enum pipe pipe, bool enable) 1588664281bSPaulo Zanoni { 1598664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1608664281bSPaulo Zanoni if (enable) { 161*7336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 162*7336df65SDaniel Vetter 1638664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 1648664281bSPaulo Zanoni return; 1658664281bSPaulo Zanoni 1668664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1678664281bSPaulo Zanoni } else { 168*7336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 169*7336df65SDaniel Vetter 170*7336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 1718664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 172*7336df65SDaniel Vetter 173*7336df65SDaniel Vetter if (!was_enabled && 174*7336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 175*7336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 176*7336df65SDaniel Vetter pipe_name(pipe)); 177*7336df65SDaniel Vetter } 1788664281bSPaulo Zanoni } 1798664281bSPaulo Zanoni } 1808664281bSPaulo Zanoni 181fee884edSDaniel Vetter /** 182fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 183fee884edSDaniel Vetter * @dev_priv: driver private 184fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 185fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 186fee884edSDaniel Vetter */ 187fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 188fee884edSDaniel Vetter uint32_t interrupt_mask, 189fee884edSDaniel Vetter uint32_t enabled_irq_mask) 190fee884edSDaniel Vetter { 191fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 192fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 193fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 194fee884edSDaniel Vetter 195fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 196fee884edSDaniel Vetter 197fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 198fee884edSDaniel Vetter POSTING_READ(SDEIMR); 199fee884edSDaniel Vetter } 200fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 201fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 202fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 203fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 204fee884edSDaniel Vetter 2058664281bSPaulo Zanoni static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, 2068664281bSPaulo Zanoni bool enable) 2078664281bSPaulo Zanoni { 2088664281bSPaulo Zanoni struct drm_device *dev = crtc->base.dev; 2098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2108664281bSPaulo Zanoni uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : 2118664281bSPaulo Zanoni SDE_TRANSB_FIFO_UNDER; 2128664281bSPaulo Zanoni 2138664281bSPaulo Zanoni if (enable) 214fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 2158664281bSPaulo Zanoni else 216fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 2178664281bSPaulo Zanoni } 2188664281bSPaulo Zanoni 2198664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 2208664281bSPaulo Zanoni enum transcoder pch_transcoder, 2218664281bSPaulo Zanoni bool enable) 2228664281bSPaulo Zanoni { 2238664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2248664281bSPaulo Zanoni 2258664281bSPaulo Zanoni if (enable) { 2261dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 2271dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 2281dd246fbSDaniel Vetter 2298664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 2308664281bSPaulo Zanoni return; 2318664281bSPaulo Zanoni 232fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 2338664281bSPaulo Zanoni } else { 2341dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 2351dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 2361dd246fbSDaniel Vetter 2371dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 238fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 2391dd246fbSDaniel Vetter 2401dd246fbSDaniel Vetter if (!was_enabled && 2411dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 2421dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 2431dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 2441dd246fbSDaniel Vetter } 2458664281bSPaulo Zanoni } 2468664281bSPaulo Zanoni } 2478664281bSPaulo Zanoni 2488664281bSPaulo Zanoni /** 2498664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 2508664281bSPaulo Zanoni * @dev: drm device 2518664281bSPaulo Zanoni * @pipe: pipe 2528664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2538664281bSPaulo Zanoni * 2548664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 2558664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 2568664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 2578664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 2588664281bSPaulo Zanoni * bit for all the pipes. 2598664281bSPaulo Zanoni * 2608664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 2618664281bSPaulo Zanoni */ 2628664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 2638664281bSPaulo Zanoni enum pipe pipe, bool enable) 2648664281bSPaulo Zanoni { 2658664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2668664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2678664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2688664281bSPaulo Zanoni unsigned long flags; 2698664281bSPaulo Zanoni bool ret; 2708664281bSPaulo Zanoni 2718664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 2728664281bSPaulo Zanoni 2738664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 2748664281bSPaulo Zanoni 2758664281bSPaulo Zanoni if (enable == ret) 2768664281bSPaulo Zanoni goto done; 2778664281bSPaulo Zanoni 2788664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 2798664281bSPaulo Zanoni 2808664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 2818664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 2828664281bSPaulo Zanoni else if (IS_GEN7(dev)) 283*7336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 2848664281bSPaulo Zanoni 2858664281bSPaulo Zanoni done: 2868664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 2878664281bSPaulo Zanoni return ret; 2888664281bSPaulo Zanoni } 2898664281bSPaulo Zanoni 2908664281bSPaulo Zanoni /** 2918664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 2928664281bSPaulo Zanoni * @dev: drm device 2938664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 2948664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2958664281bSPaulo Zanoni * 2968664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 2978664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 2988664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 2998664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 3008664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 3018664281bSPaulo Zanoni * 3028664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3038664281bSPaulo Zanoni */ 3048664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 3058664281bSPaulo Zanoni enum transcoder pch_transcoder, 3068664281bSPaulo Zanoni bool enable) 3078664281bSPaulo Zanoni { 3088664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 3098664281bSPaulo Zanoni enum pipe p; 3108664281bSPaulo Zanoni struct drm_crtc *crtc; 3118664281bSPaulo Zanoni struct intel_crtc *intel_crtc; 3128664281bSPaulo Zanoni unsigned long flags; 3138664281bSPaulo Zanoni bool ret; 3148664281bSPaulo Zanoni 3158664281bSPaulo Zanoni if (HAS_PCH_LPT(dev)) { 3168664281bSPaulo Zanoni crtc = NULL; 3178664281bSPaulo Zanoni for_each_pipe(p) { 3188664281bSPaulo Zanoni struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; 3198664281bSPaulo Zanoni if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { 3208664281bSPaulo Zanoni crtc = c; 3218664281bSPaulo Zanoni break; 3228664281bSPaulo Zanoni } 3238664281bSPaulo Zanoni } 3248664281bSPaulo Zanoni if (!crtc) { 3258664281bSPaulo Zanoni DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n"); 3268664281bSPaulo Zanoni return false; 3278664281bSPaulo Zanoni } 3288664281bSPaulo Zanoni } else { 3298664281bSPaulo Zanoni crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 3308664281bSPaulo Zanoni } 3318664281bSPaulo Zanoni intel_crtc = to_intel_crtc(crtc); 3328664281bSPaulo Zanoni 3338664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3348664281bSPaulo Zanoni 3358664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 3368664281bSPaulo Zanoni 3378664281bSPaulo Zanoni if (enable == ret) 3388664281bSPaulo Zanoni goto done; 3398664281bSPaulo Zanoni 3408664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 3418664281bSPaulo Zanoni 3428664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 3438664281bSPaulo Zanoni ibx_set_fifo_underrun_reporting(intel_crtc, enable); 3448664281bSPaulo Zanoni else 3458664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 3468664281bSPaulo Zanoni 3478664281bSPaulo Zanoni done: 3488664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3498664281bSPaulo Zanoni return ret; 3508664281bSPaulo Zanoni } 3518664281bSPaulo Zanoni 3528664281bSPaulo Zanoni 3537c463586SKeith Packard void 3547c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3557c463586SKeith Packard { 3569db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 35746c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3587c463586SKeith Packard 359b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 360b79480baSDaniel Vetter 36146c06a30SVille Syrjälä if ((pipestat & mask) == mask) 36246c06a30SVille Syrjälä return; 36346c06a30SVille Syrjälä 3647c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 36546c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 36646c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3673143a2bfSChris Wilson POSTING_READ(reg); 3687c463586SKeith Packard } 3697c463586SKeith Packard 3707c463586SKeith Packard void 3717c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3727c463586SKeith Packard { 3739db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 37446c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3757c463586SKeith Packard 376b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 377b79480baSDaniel Vetter 37846c06a30SVille Syrjälä if ((pipestat & mask) == 0) 37946c06a30SVille Syrjälä return; 38046c06a30SVille Syrjälä 38146c06a30SVille Syrjälä pipestat &= ~mask; 38246c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3833143a2bfSChris Wilson POSTING_READ(reg); 3847c463586SKeith Packard } 3857c463586SKeith Packard 386c0e09200SDave Airlie /** 387f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 38801c66889SZhao Yakui */ 389f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 39001c66889SZhao Yakui { 3911ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 3921ec14ad3SChris Wilson unsigned long irqflags; 3931ec14ad3SChris Wilson 394f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 395f49e38ddSJani Nikula return; 396f49e38ddSJani Nikula 3971ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 39801c66889SZhao Yakui 399f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 400a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 401f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 4021ec14ad3SChris Wilson 4031ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 40401c66889SZhao Yakui } 40501c66889SZhao Yakui 40601c66889SZhao Yakui /** 4070a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 4080a3e67a4SJesse Barnes * @dev: DRM device 4090a3e67a4SJesse Barnes * @pipe: pipe to check 4100a3e67a4SJesse Barnes * 4110a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 4120a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 4130a3e67a4SJesse Barnes * before reading such registers if unsure. 4140a3e67a4SJesse Barnes */ 4150a3e67a4SJesse Barnes static int 4160a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 4170a3e67a4SJesse Barnes { 4180a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 419702e7a56SPaulo Zanoni 420a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 421a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 422a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 423a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 42471f8ba6bSPaulo Zanoni 425a01025afSDaniel Vetter return intel_crtc->active; 426a01025afSDaniel Vetter } else { 427a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 428a01025afSDaniel Vetter } 4290a3e67a4SJesse Barnes } 4300a3e67a4SJesse Barnes 43142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 43242f52ef8SKeith Packard * we use as a pipe index 43342f52ef8SKeith Packard */ 434f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 4350a3e67a4SJesse Barnes { 4360a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4370a3e67a4SJesse Barnes unsigned long high_frame; 4380a3e67a4SJesse Barnes unsigned long low_frame; 4395eddb70bSChris Wilson u32 high1, high2, low; 4400a3e67a4SJesse Barnes 4410a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 44244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4439db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4440a3e67a4SJesse Barnes return 0; 4450a3e67a4SJesse Barnes } 4460a3e67a4SJesse Barnes 4479db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 4489db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 4495eddb70bSChris Wilson 4500a3e67a4SJesse Barnes /* 4510a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 4520a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 4530a3e67a4SJesse Barnes * register. 4540a3e67a4SJesse Barnes */ 4550a3e67a4SJesse Barnes do { 4565eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4575eddb70bSChris Wilson low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 4585eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4590a3e67a4SJesse Barnes } while (high1 != high2); 4600a3e67a4SJesse Barnes 4615eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 4625eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 4635eddb70bSChris Wilson return (high1 << 8) | low; 4640a3e67a4SJesse Barnes } 4650a3e67a4SJesse Barnes 466f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 4679880b7a5SJesse Barnes { 4689880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4699db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 4709880b7a5SJesse Barnes 4719880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 47244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4739db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4749880b7a5SJesse Barnes return 0; 4759880b7a5SJesse Barnes } 4769880b7a5SJesse Barnes 4779880b7a5SJesse Barnes return I915_READ(reg); 4789880b7a5SJesse Barnes } 4799880b7a5SJesse Barnes 480f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 4810af7e4dfSMario Kleiner int *vpos, int *hpos) 4820af7e4dfSMario Kleiner { 4830af7e4dfSMario Kleiner drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4840af7e4dfSMario Kleiner u32 vbl = 0, position = 0; 4850af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 4860af7e4dfSMario Kleiner bool in_vbl = true; 4870af7e4dfSMario Kleiner int ret = 0; 488fe2b8f9dSPaulo Zanoni enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 489fe2b8f9dSPaulo Zanoni pipe); 4900af7e4dfSMario Kleiner 4910af7e4dfSMario Kleiner if (!i915_pipe_enabled(dev, pipe)) { 4920af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 4939db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4940af7e4dfSMario Kleiner return 0; 4950af7e4dfSMario Kleiner } 4960af7e4dfSMario Kleiner 4970af7e4dfSMario Kleiner /* Get vtotal. */ 498fe2b8f9dSPaulo Zanoni vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 4990af7e4dfSMario Kleiner 5000af7e4dfSMario Kleiner if (INTEL_INFO(dev)->gen >= 4) { 5010af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 5020af7e4dfSMario Kleiner * scanout position from Display scan line register. 5030af7e4dfSMario Kleiner */ 5040af7e4dfSMario Kleiner position = I915_READ(PIPEDSL(pipe)); 5050af7e4dfSMario Kleiner 5060af7e4dfSMario Kleiner /* Decode into vertical scanout position. Don't have 5070af7e4dfSMario Kleiner * horizontal scanout position. 5080af7e4dfSMario Kleiner */ 5090af7e4dfSMario Kleiner *vpos = position & 0x1fff; 5100af7e4dfSMario Kleiner *hpos = 0; 5110af7e4dfSMario Kleiner } else { 5120af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 5130af7e4dfSMario Kleiner * We can split this into vertical and horizontal 5140af7e4dfSMario Kleiner * scanout position. 5150af7e4dfSMario Kleiner */ 5160af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 5170af7e4dfSMario Kleiner 518fe2b8f9dSPaulo Zanoni htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 5190af7e4dfSMario Kleiner *vpos = position / htotal; 5200af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 5210af7e4dfSMario Kleiner } 5220af7e4dfSMario Kleiner 5230af7e4dfSMario Kleiner /* Query vblank area. */ 524fe2b8f9dSPaulo Zanoni vbl = I915_READ(VBLANK(cpu_transcoder)); 5250af7e4dfSMario Kleiner 5260af7e4dfSMario Kleiner /* Test position against vblank region. */ 5270af7e4dfSMario Kleiner vbl_start = vbl & 0x1fff; 5280af7e4dfSMario Kleiner vbl_end = (vbl >> 16) & 0x1fff; 5290af7e4dfSMario Kleiner 5300af7e4dfSMario Kleiner if ((*vpos < vbl_start) || (*vpos > vbl_end)) 5310af7e4dfSMario Kleiner in_vbl = false; 5320af7e4dfSMario Kleiner 5330af7e4dfSMario Kleiner /* Inside "upper part" of vblank area? Apply corrective offset: */ 5340af7e4dfSMario Kleiner if (in_vbl && (*vpos >= vbl_start)) 5350af7e4dfSMario Kleiner *vpos = *vpos - vtotal; 5360af7e4dfSMario Kleiner 5370af7e4dfSMario Kleiner /* Readouts valid? */ 5380af7e4dfSMario Kleiner if (vbl > 0) 5390af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 5400af7e4dfSMario Kleiner 5410af7e4dfSMario Kleiner /* In vblank? */ 5420af7e4dfSMario Kleiner if (in_vbl) 5430af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 5440af7e4dfSMario Kleiner 5450af7e4dfSMario Kleiner return ret; 5460af7e4dfSMario Kleiner } 5470af7e4dfSMario Kleiner 548f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 5490af7e4dfSMario Kleiner int *max_error, 5500af7e4dfSMario Kleiner struct timeval *vblank_time, 5510af7e4dfSMario Kleiner unsigned flags) 5520af7e4dfSMario Kleiner { 5534041b853SChris Wilson struct drm_crtc *crtc; 5540af7e4dfSMario Kleiner 5557eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 5564041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5570af7e4dfSMario Kleiner return -EINVAL; 5580af7e4dfSMario Kleiner } 5590af7e4dfSMario Kleiner 5600af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 5614041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 5624041b853SChris Wilson if (crtc == NULL) { 5634041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5644041b853SChris Wilson return -EINVAL; 5654041b853SChris Wilson } 5664041b853SChris Wilson 5674041b853SChris Wilson if (!crtc->enabled) { 5684041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 5694041b853SChris Wilson return -EBUSY; 5704041b853SChris Wilson } 5710af7e4dfSMario Kleiner 5720af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 5734041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 5744041b853SChris Wilson vblank_time, flags, 5754041b853SChris Wilson crtc); 5760af7e4dfSMario Kleiner } 5770af7e4dfSMario Kleiner 578321a1b30SEgbert Eich static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 579321a1b30SEgbert Eich { 580321a1b30SEgbert Eich enum drm_connector_status old_status; 581321a1b30SEgbert Eich 582321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 583321a1b30SEgbert Eich old_status = connector->status; 584321a1b30SEgbert Eich 585321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 586321a1b30SEgbert Eich DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 587321a1b30SEgbert Eich connector->base.id, 588321a1b30SEgbert Eich drm_get_connector_name(connector), 589321a1b30SEgbert Eich old_status, connector->status); 590321a1b30SEgbert Eich return (old_status != connector->status); 591321a1b30SEgbert Eich } 592321a1b30SEgbert Eich 5935ca58282SJesse Barnes /* 5945ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 5955ca58282SJesse Barnes */ 596ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 597ac4c16c5SEgbert Eich 5985ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 5995ca58282SJesse Barnes { 6005ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 6015ca58282SJesse Barnes hotplug_work); 6025ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 603c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 604cd569aedSEgbert Eich struct intel_connector *intel_connector; 605cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 606cd569aedSEgbert Eich struct drm_connector *connector; 607cd569aedSEgbert Eich unsigned long irqflags; 608cd569aedSEgbert Eich bool hpd_disabled = false; 609321a1b30SEgbert Eich bool changed = false; 610142e2398SEgbert Eich u32 hpd_event_bits; 6115ca58282SJesse Barnes 61252d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 61352d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 61452d7ecedSDaniel Vetter return; 61552d7ecedSDaniel Vetter 616a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 617e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 618e67189abSJesse Barnes 619cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 620142e2398SEgbert Eich 621142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 622142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 623cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 624cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 625cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 626cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 627cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 628cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 629cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 630cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 631cd569aedSEgbert Eich drm_get_connector_name(connector)); 632cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 633cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 634cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 635cd569aedSEgbert Eich hpd_disabled = true; 636cd569aedSEgbert Eich } 637142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 638142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 639142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 640142e2398SEgbert Eich } 641cd569aedSEgbert Eich } 642cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 643cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 644cd569aedSEgbert Eich * some connectors */ 645ac4c16c5SEgbert Eich if (hpd_disabled) { 646cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 647ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 648ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 649ac4c16c5SEgbert Eich } 650cd569aedSEgbert Eich 651cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 652cd569aedSEgbert Eich 653321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 654321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 655321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 656321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 657cd569aedSEgbert Eich if (intel_encoder->hot_plug) 658cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 659321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 660321a1b30SEgbert Eich changed = true; 661321a1b30SEgbert Eich } 662321a1b30SEgbert Eich } 66340ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 66440ee3381SKeith Packard 665321a1b30SEgbert Eich if (changed) 666321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 6675ca58282SJesse Barnes } 6685ca58282SJesse Barnes 66973edd18fSDaniel Vetter static void ironlake_handle_rps_change(struct drm_device *dev) 670f97108d1SJesse Barnes { 671f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 672b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 6739270388eSDaniel Vetter u8 new_delay; 6749270388eSDaniel Vetter unsigned long flags; 6759270388eSDaniel Vetter 6769270388eSDaniel Vetter spin_lock_irqsave(&mchdev_lock, flags); 677f97108d1SJesse Barnes 67873edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 67973edd18fSDaniel Vetter 68020e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 6819270388eSDaniel Vetter 6827648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 683b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 684b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 685f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 686f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 687f97108d1SJesse Barnes 688f97108d1SJesse Barnes /* Handle RCS change request from hw */ 689b5b72e89SMatthew Garrett if (busy_up > max_avg) { 69020e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 69120e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 69220e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 69320e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 694b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 69520e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 69620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 69720e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 69820e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 699f97108d1SJesse Barnes } 700f97108d1SJesse Barnes 7017648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 70220e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 703f97108d1SJesse Barnes 7049270388eSDaniel Vetter spin_unlock_irqrestore(&mchdev_lock, flags); 7059270388eSDaniel Vetter 706f97108d1SJesse Barnes return; 707f97108d1SJesse Barnes } 708f97108d1SJesse Barnes 709549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 710549f7365SChris Wilson struct intel_ring_buffer *ring) 711549f7365SChris Wilson { 712549f7365SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 7139862e600SChris Wilson 714475553deSChris Wilson if (ring->obj == NULL) 715475553deSChris Wilson return; 716475553deSChris Wilson 717b2eadbc8SChris Wilson trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 7189862e600SChris Wilson 719549f7365SChris Wilson wake_up_all(&ring->irq_queue); 7203e0dc6b0SBen Widawsky if (i915_enable_hangcheck) { 72199584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 722cecc21feSChris Wilson round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 7233e0dc6b0SBen Widawsky } 724549f7365SChris Wilson } 725549f7365SChris Wilson 7264912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 7273b8d8d91SJesse Barnes { 7284912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 729c6a828d3SDaniel Vetter rps.work); 7304912d041SBen Widawsky u32 pm_iir, pm_imr; 7317b9e0ae6SChris Wilson u8 new_delay; 7323b8d8d91SJesse Barnes 733c6a828d3SDaniel Vetter spin_lock_irq(&dev_priv->rps.lock); 734c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 735c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 7364912d041SBen Widawsky pm_imr = I915_READ(GEN6_PMIMR); 7374848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 7384848405cSBen Widawsky I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 739c6a828d3SDaniel Vetter spin_unlock_irq(&dev_priv->rps.lock); 7404912d041SBen Widawsky 7414848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 7423b8d8d91SJesse Barnes return; 7433b8d8d91SJesse Barnes 7444fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 7457b9e0ae6SChris Wilson 7467425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 747c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay + 1; 7487425034aSVille Syrjälä 7497425034aSVille Syrjälä /* 7507425034aSVille Syrjälä * For better performance, jump directly 7517425034aSVille Syrjälä * to RPe if we're below it. 7527425034aSVille Syrjälä */ 7537425034aSVille Syrjälä if (IS_VALLEYVIEW(dev_priv->dev) && 7547425034aSVille Syrjälä dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 7557425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 7567425034aSVille Syrjälä } else 757c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay - 1; 7583b8d8d91SJesse Barnes 75979249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 76079249636SBen Widawsky * interrupt 76179249636SBen Widawsky */ 762d8289c9eSVille Syrjälä if (new_delay >= dev_priv->rps.min_delay && 763d8289c9eSVille Syrjälä new_delay <= dev_priv->rps.max_delay) { 7640a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 7650a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 7660a073b84SJesse Barnes else 7674912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 76879249636SBen Widawsky } 7693b8d8d91SJesse Barnes 77052ceb908SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) { 77152ceb908SJesse Barnes /* 77252ceb908SJesse Barnes * On VLV, when we enter RC6 we may not be at the minimum 77352ceb908SJesse Barnes * voltage level, so arm a timer to check. It should only 77452ceb908SJesse Barnes * fire when there's activity or once after we've entered 77552ceb908SJesse Barnes * RC6, and then won't be re-armed until the next RPS interrupt. 77652ceb908SJesse Barnes */ 77752ceb908SJesse Barnes mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 77852ceb908SJesse Barnes msecs_to_jiffies(100)); 77952ceb908SJesse Barnes } 78052ceb908SJesse Barnes 7814fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 7823b8d8d91SJesse Barnes } 7833b8d8d91SJesse Barnes 784e3689190SBen Widawsky 785e3689190SBen Widawsky /** 786e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 787e3689190SBen Widawsky * occurred. 788e3689190SBen Widawsky * @work: workqueue struct 789e3689190SBen Widawsky * 790e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 791e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 792e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 793e3689190SBen Widawsky */ 794e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 795e3689190SBen Widawsky { 796e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 797a4da4fa4SDaniel Vetter l3_parity.error_work); 798e3689190SBen Widawsky u32 error_status, row, bank, subbank; 799e3689190SBen Widawsky char *parity_event[5]; 800e3689190SBen Widawsky uint32_t misccpctl; 801e3689190SBen Widawsky unsigned long flags; 802e3689190SBen Widawsky 803e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 804e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 805e3689190SBen Widawsky * any time we access those registers. 806e3689190SBen Widawsky */ 807e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 808e3689190SBen Widawsky 809e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 810e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 811e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 812e3689190SBen Widawsky 813e3689190SBen Widawsky error_status = I915_READ(GEN7_L3CDERRST1); 814e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 815e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 816e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 817e3689190SBen Widawsky 818e3689190SBen Widawsky I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 819e3689190SBen Widawsky GEN7_L3CDERRST1_ENABLE); 820e3689190SBen Widawsky POSTING_READ(GEN7_L3CDERRST1); 821e3689190SBen Widawsky 822e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 823e3689190SBen Widawsky 824e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 825cc609d5dSBen Widawsky dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 826e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 827e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 828e3689190SBen Widawsky 829e3689190SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 830e3689190SBen Widawsky 831e3689190SBen Widawsky parity_event[0] = "L3_PARITY_ERROR=1"; 832e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 833e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 834e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 835e3689190SBen Widawsky parity_event[4] = NULL; 836e3689190SBen Widawsky 837e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 838e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 839e3689190SBen Widawsky 840e3689190SBen Widawsky DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 841e3689190SBen Widawsky row, bank, subbank); 842e3689190SBen Widawsky 843e3689190SBen Widawsky kfree(parity_event[3]); 844e3689190SBen Widawsky kfree(parity_event[2]); 845e3689190SBen Widawsky kfree(parity_event[1]); 846e3689190SBen Widawsky } 847e3689190SBen Widawsky 848d2ba8470SDaniel Vetter static void ivybridge_handle_parity_error(struct drm_device *dev) 849e3689190SBen Widawsky { 850e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 851e3689190SBen Widawsky unsigned long flags; 852e3689190SBen Widawsky 853e1ef7cc2SBen Widawsky if (!HAS_L3_GPU_CACHE(dev)) 854e3689190SBen Widawsky return; 855e3689190SBen Widawsky 856e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 857cc609d5dSBen Widawsky dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 858e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 859e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 860e3689190SBen Widawsky 861a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 862e3689190SBen Widawsky } 863e3689190SBen Widawsky 864e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 865e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 866e7b4c6b1SDaniel Vetter u32 gt_iir) 867e7b4c6b1SDaniel Vetter { 868e7b4c6b1SDaniel Vetter 869cc609d5dSBen Widawsky if (gt_iir & 870cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 871e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 872cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 873e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 874cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 875e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 876e7b4c6b1SDaniel Vetter 877cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 878cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 879cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 880e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 881e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 882e7b4c6b1SDaniel Vetter } 883e3689190SBen Widawsky 884cc609d5dSBen Widawsky if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 885e3689190SBen Widawsky ivybridge_handle_parity_error(dev); 886e7b4c6b1SDaniel Vetter } 887e7b4c6b1SDaniel Vetter 888baf02a1fSBen Widawsky /* Legacy way of handling PM interrupts */ 889fc6826d1SChris Wilson static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 890fc6826d1SChris Wilson u32 pm_iir) 891fc6826d1SChris Wilson { 892fc6826d1SChris Wilson unsigned long flags; 893fc6826d1SChris Wilson 894fc6826d1SChris Wilson /* 895fc6826d1SChris Wilson * IIR bits should never already be set because IMR should 896fc6826d1SChris Wilson * prevent an interrupt from being shown in IIR. The warning 897fc6826d1SChris Wilson * displays a case where we've unsafely cleared 898c6a828d3SDaniel Vetter * dev_priv->rps.pm_iir. Although missing an interrupt of the same 899fc6826d1SChris Wilson * type is not a problem, it displays a problem in the logic. 900fc6826d1SChris Wilson * 901c6a828d3SDaniel Vetter * The mask bit in IMR is cleared by dev_priv->rps.work. 902fc6826d1SChris Wilson */ 903fc6826d1SChris Wilson 904c6a828d3SDaniel Vetter spin_lock_irqsave(&dev_priv->rps.lock, flags); 905c6a828d3SDaniel Vetter dev_priv->rps.pm_iir |= pm_iir; 906c6a828d3SDaniel Vetter I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 907fc6826d1SChris Wilson POSTING_READ(GEN6_PMIMR); 908c6a828d3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 909fc6826d1SChris Wilson 910c6a828d3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 911fc6826d1SChris Wilson } 912fc6826d1SChris Wilson 913b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 914b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 915b543fb04SEgbert Eich 91610a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 917b543fb04SEgbert Eich u32 hotplug_trigger, 918b543fb04SEgbert Eich const u32 *hpd) 919b543fb04SEgbert Eich { 920b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 921b543fb04SEgbert Eich int i; 92210a504deSDaniel Vetter bool storm_detected = false; 923b543fb04SEgbert Eich 92491d131d2SDaniel Vetter if (!hotplug_trigger) 92591d131d2SDaniel Vetter return; 92691d131d2SDaniel Vetter 927b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 928b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 929821450c6SEgbert Eich 930b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 931b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 932b543fb04SEgbert Eich continue; 933b543fb04SEgbert Eich 934bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 935b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 936b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 937b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 938b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 939b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 940b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 941b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 942142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 943b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 94410a504deSDaniel Vetter storm_detected = true; 945b543fb04SEgbert Eich } else { 946b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 947b543fb04SEgbert Eich } 948b543fb04SEgbert Eich } 949b543fb04SEgbert Eich 95010a504deSDaniel Vetter if (storm_detected) 95110a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 952b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 9535876fa0dSDaniel Vetter 9545876fa0dSDaniel Vetter queue_work(dev_priv->wq, 9555876fa0dSDaniel Vetter &dev_priv->hotplug_work); 956b543fb04SEgbert Eich } 957b543fb04SEgbert Eich 958515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 959515ac2bbSDaniel Vetter { 96028c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 96128c70f16SDaniel Vetter 96228c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 963515ac2bbSDaniel Vetter } 964515ac2bbSDaniel Vetter 965ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 966ce99c256SDaniel Vetter { 9679ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 9689ee32feaSDaniel Vetter 9699ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 970ce99c256SDaniel Vetter } 971ce99c256SDaniel Vetter 972baf02a1fSBen Widawsky /* Unlike gen6_queue_rps_work() from which this function is originally derived, 973baf02a1fSBen Widawsky * we must be able to deal with other PM interrupts. This is complicated because 974baf02a1fSBen Widawsky * of the way in which we use the masks to defer the RPS work (which for 975baf02a1fSBen Widawsky * posterity is necessary because of forcewake). 976baf02a1fSBen Widawsky */ 977baf02a1fSBen Widawsky static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 978baf02a1fSBen Widawsky u32 pm_iir) 979baf02a1fSBen Widawsky { 980baf02a1fSBen Widawsky unsigned long flags; 981baf02a1fSBen Widawsky 982baf02a1fSBen Widawsky spin_lock_irqsave(&dev_priv->rps.lock, flags); 9834848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 984baf02a1fSBen Widawsky if (dev_priv->rps.pm_iir) { 985baf02a1fSBen Widawsky I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 986baf02a1fSBen Widawsky /* never want to mask useful interrupts. (also posting read) */ 9874848405cSBen Widawsky WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 988baf02a1fSBen Widawsky /* TODO: if queue_work is slow, move it out of the spinlock */ 989baf02a1fSBen Widawsky queue_work(dev_priv->wq, &dev_priv->rps.work); 990baf02a1fSBen Widawsky } 991baf02a1fSBen Widawsky spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 992baf02a1fSBen Widawsky 99312638c57SBen Widawsky if (pm_iir & ~GEN6_PM_RPS_EVENTS) { 99412638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 99512638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 99612638c57SBen Widawsky 99712638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 99812638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 99912638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 100012638c57SBen Widawsky } 100112638c57SBen Widawsky } 1002baf02a1fSBen Widawsky } 1003baf02a1fSBen Widawsky 1004ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 10057e231dbeSJesse Barnes { 10067e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 10077e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10087e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 10097e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 10107e231dbeSJesse Barnes unsigned long irqflags; 10117e231dbeSJesse Barnes int pipe; 10127e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 10137e231dbeSJesse Barnes 10147e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 10157e231dbeSJesse Barnes 10167e231dbeSJesse Barnes while (true) { 10177e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 10187e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 10197e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 10207e231dbeSJesse Barnes 10217e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 10227e231dbeSJesse Barnes goto out; 10237e231dbeSJesse Barnes 10247e231dbeSJesse Barnes ret = IRQ_HANDLED; 10257e231dbeSJesse Barnes 1026e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 10277e231dbeSJesse Barnes 10287e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 10297e231dbeSJesse Barnes for_each_pipe(pipe) { 10307e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 10317e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 10327e231dbeSJesse Barnes 10337e231dbeSJesse Barnes /* 10347e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 10357e231dbeSJesse Barnes */ 10367e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 10377e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 10387e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 10397e231dbeSJesse Barnes pipe_name(pipe)); 10407e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 10417e231dbeSJesse Barnes } 10427e231dbeSJesse Barnes } 10437e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 10447e231dbeSJesse Barnes 104531acc7f5SJesse Barnes for_each_pipe(pipe) { 104631acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 104731acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 104831acc7f5SJesse Barnes 104931acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 105031acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 105131acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 105231acc7f5SJesse Barnes } 105331acc7f5SJesse Barnes } 105431acc7f5SJesse Barnes 10557e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 10567e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 10577e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1058b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 10597e231dbeSJesse Barnes 10607e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 10617e231dbeSJesse Barnes hotplug_status); 106291d131d2SDaniel Vetter 106310a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 106491d131d2SDaniel Vetter 10657e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 10667e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 10677e231dbeSJesse Barnes } 10687e231dbeSJesse Barnes 1069515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1070515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 10717e231dbeSJesse Barnes 10724848405cSBen Widawsky if (pm_iir & GEN6_PM_RPS_EVENTS) 1073fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 10747e231dbeSJesse Barnes 10757e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 10767e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 10777e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 10787e231dbeSJesse Barnes } 10797e231dbeSJesse Barnes 10807e231dbeSJesse Barnes out: 10817e231dbeSJesse Barnes return ret; 10827e231dbeSJesse Barnes } 10837e231dbeSJesse Barnes 108423e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1085776ad806SJesse Barnes { 1086776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10879db4a9c7SJesse Barnes int pipe; 1088b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1089776ad806SJesse Barnes 109010a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 109191d131d2SDaniel Vetter 1092cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1093cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1094776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1095cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1096cfc33bf7SVille Syrjälä port_name(port)); 1097cfc33bf7SVille Syrjälä } 1098776ad806SJesse Barnes 1099ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1100ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1101ce99c256SDaniel Vetter 1102776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1103515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1104776ad806SJesse Barnes 1105776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1106776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1107776ad806SJesse Barnes 1108776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1109776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1110776ad806SJesse Barnes 1111776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1112776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1113776ad806SJesse Barnes 11149db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 11159db4a9c7SJesse Barnes for_each_pipe(pipe) 11169db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 11179db4a9c7SJesse Barnes pipe_name(pipe), 11189db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1119776ad806SJesse Barnes 1120776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1121776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1122776ad806SJesse Barnes 1123776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1124776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1125776ad806SJesse Barnes 1126776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 11278664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11288664281bSPaulo Zanoni false)) 11298664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11308664281bSPaulo Zanoni 11318664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 11328664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11338664281bSPaulo Zanoni false)) 11348664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11358664281bSPaulo Zanoni } 11368664281bSPaulo Zanoni 11378664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 11388664281bSPaulo Zanoni { 11398664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11408664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 11418664281bSPaulo Zanoni 1142de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1143de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1144de032bf4SPaulo Zanoni 11458664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 11468664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 11478664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 11488664281bSPaulo Zanoni 11498664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 11508664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 11518664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 11528664281bSPaulo Zanoni 11538664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 11548664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 11558664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 11568664281bSPaulo Zanoni 11578664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 11588664281bSPaulo Zanoni } 11598664281bSPaulo Zanoni 11608664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 11618664281bSPaulo Zanoni { 11628664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11638664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 11648664281bSPaulo Zanoni 1165de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1166de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1167de032bf4SPaulo Zanoni 11688664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 11698664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11708664281bSPaulo Zanoni false)) 11718664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11728664281bSPaulo Zanoni 11738664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 11748664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11758664281bSPaulo Zanoni false)) 11768664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11778664281bSPaulo Zanoni 11788664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 11798664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 11808664281bSPaulo Zanoni false)) 11818664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 11828664281bSPaulo Zanoni 11838664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1184776ad806SJesse Barnes } 1185776ad806SJesse Barnes 118623e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 118723e81d69SAdam Jackson { 118823e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 118923e81d69SAdam Jackson int pipe; 1190b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 119123e81d69SAdam Jackson 119210a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 119391d131d2SDaniel Vetter 1194cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1195cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 119623e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1197cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1198cfc33bf7SVille Syrjälä port_name(port)); 1199cfc33bf7SVille Syrjälä } 120023e81d69SAdam Jackson 120123e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1202ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 120323e81d69SAdam Jackson 120423e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1205515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 120623e81d69SAdam Jackson 120723e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 120823e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 120923e81d69SAdam Jackson 121023e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 121123e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 121223e81d69SAdam Jackson 121323e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 121423e81d69SAdam Jackson for_each_pipe(pipe) 121523e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 121623e81d69SAdam Jackson pipe_name(pipe), 121723e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 12188664281bSPaulo Zanoni 12198664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 12208664281bSPaulo Zanoni cpt_serr_int_handler(dev); 122123e81d69SAdam Jackson } 122223e81d69SAdam Jackson 1223ff1f525eSDaniel Vetter static irqreturn_t ivybridge_irq_handler(int irq, void *arg) 1224b1f14ad0SJesse Barnes { 1225b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1226b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1227ab5c608bSBen Widawsky u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; 12280e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 12290e43406bSChris Wilson int i; 1230b1f14ad0SJesse Barnes 1231b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1232b1f14ad0SJesse Barnes 12338664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 12348664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 12358664281bSPaulo Zanoni if (IS_HASWELL(dev) && 12368664281bSPaulo Zanoni (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 12378664281bSPaulo Zanoni DRM_ERROR("Unclaimed register before interrupt\n"); 12388664281bSPaulo Zanoni I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 12398664281bSPaulo Zanoni } 12408664281bSPaulo Zanoni 1241b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1242b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1243b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 12440e43406bSChris Wilson 124544498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 124644498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 124744498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 124844498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 124944498aeaSPaulo Zanoni * due to its back queue). */ 1250ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 125144498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 125244498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 125344498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1254ab5c608bSBen Widawsky } 125544498aeaSPaulo Zanoni 12568664281bSPaulo Zanoni /* On Haswell, also mask ERR_INT because we don't want to risk 12578664281bSPaulo Zanoni * generating "unclaimed register" interrupts from inside the interrupt 12588664281bSPaulo Zanoni * handler. */ 12594bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 12604bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 12618664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 12624bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 12634bc9d430SDaniel Vetter } 12648664281bSPaulo Zanoni 12650e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 12660e43406bSChris Wilson if (gt_iir) { 12670e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 12680e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 12690e43406bSChris Wilson ret = IRQ_HANDLED; 12700e43406bSChris Wilson } 1271b1f14ad0SJesse Barnes 1272b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 12730e43406bSChris Wilson if (de_iir) { 12748664281bSPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 12758664281bSPaulo Zanoni ivb_err_int_handler(dev); 12768664281bSPaulo Zanoni 1277ce99c256SDaniel Vetter if (de_iir & DE_AUX_CHANNEL_A_IVB) 1278ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1279ce99c256SDaniel Vetter 1280b1f14ad0SJesse Barnes if (de_iir & DE_GSE_IVB) 128181a07809SJani Nikula intel_opregion_asle_intr(dev); 1282b1f14ad0SJesse Barnes 12830e43406bSChris Wilson for (i = 0; i < 3; i++) { 128474d44445SDaniel Vetter if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 128574d44445SDaniel Vetter drm_handle_vblank(dev, i); 12860e43406bSChris Wilson if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 12870e43406bSChris Wilson intel_prepare_page_flip(dev, i); 12880e43406bSChris Wilson intel_finish_page_flip_plane(dev, i); 1289b1f14ad0SJesse Barnes } 1290b1f14ad0SJesse Barnes } 1291b1f14ad0SJesse Barnes 1292b1f14ad0SJesse Barnes /* check event from PCH */ 1293ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 12940e43406bSChris Wilson u32 pch_iir = I915_READ(SDEIIR); 12950e43406bSChris Wilson 129623e81d69SAdam Jackson cpt_irq_handler(dev, pch_iir); 12970e43406bSChris Wilson 12980e43406bSChris Wilson /* clear PCH hotplug event before clear CPU irq */ 12990e43406bSChris Wilson I915_WRITE(SDEIIR, pch_iir); 1300b1f14ad0SJesse Barnes } 1301b1f14ad0SJesse Barnes 13020e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 13030e43406bSChris Wilson ret = IRQ_HANDLED; 13040e43406bSChris Wilson } 13050e43406bSChris Wilson 13060e43406bSChris Wilson pm_iir = I915_READ(GEN6_PMIIR); 13070e43406bSChris Wilson if (pm_iir) { 1308baf02a1fSBen Widawsky if (IS_HASWELL(dev)) 1309baf02a1fSBen Widawsky hsw_pm_irq_handler(dev_priv, pm_iir); 13104848405cSBen Widawsky else if (pm_iir & GEN6_PM_RPS_EVENTS) 1311fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 1312b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 13130e43406bSChris Wilson ret = IRQ_HANDLED; 13140e43406bSChris Wilson } 1315b1f14ad0SJesse Barnes 13164bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 13174bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 13184bc9d430SDaniel Vetter if (ivb_can_enable_err_int(dev)) 13198664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 13204bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 13214bc9d430SDaniel Vetter } 13228664281bSPaulo Zanoni 1323b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1324b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1325ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 132644498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 132744498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1328ab5c608bSBen Widawsky } 1329b1f14ad0SJesse Barnes 1330b1f14ad0SJesse Barnes return ret; 1331b1f14ad0SJesse Barnes } 1332b1f14ad0SJesse Barnes 1333e7b4c6b1SDaniel Vetter static void ilk_gt_irq_handler(struct drm_device *dev, 1334e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 1335e7b4c6b1SDaniel Vetter u32 gt_iir) 1336e7b4c6b1SDaniel Vetter { 1337cc609d5dSBen Widawsky if (gt_iir & 1338cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1339e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 1340cc609d5dSBen Widawsky if (gt_iir & ILK_BSD_USER_INTERRUPT) 1341e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 1342e7b4c6b1SDaniel Vetter } 1343e7b4c6b1SDaniel Vetter 1344ff1f525eSDaniel Vetter static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1345036a4a7dSZhenyu Wang { 13464697995bSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1347036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1348036a4a7dSZhenyu Wang int ret = IRQ_NONE; 134944498aeaSPaulo Zanoni u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 1350881f47b6SXiang, Haihao 13514697995bSJesse Barnes atomic_inc(&dev_priv->irq_received); 13524697995bSJesse Barnes 13532d109a84SZou, Nanhai /* disable master interrupt before clearing iir */ 13542d109a84SZou, Nanhai de_ier = I915_READ(DEIER); 13552d109a84SZou, Nanhai I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 13563143a2bfSChris Wilson POSTING_READ(DEIER); 13572d109a84SZou, Nanhai 135844498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 135944498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 136044498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 136144498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 136244498aeaSPaulo Zanoni * due to its back queue). */ 136344498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 136444498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 136544498aeaSPaulo Zanoni POSTING_READ(SDEIER); 136644498aeaSPaulo Zanoni 1367036a4a7dSZhenyu Wang de_iir = I915_READ(DEIIR); 1368036a4a7dSZhenyu Wang gt_iir = I915_READ(GTIIR); 13693b8d8d91SJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 1370036a4a7dSZhenyu Wang 1371acd15b6cSDaniel Vetter if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 1372c7c85101SZou Nan hai goto done; 1373036a4a7dSZhenyu Wang 1374036a4a7dSZhenyu Wang ret = IRQ_HANDLED; 1375036a4a7dSZhenyu Wang 1376e7b4c6b1SDaniel Vetter if (IS_GEN5(dev)) 1377e7b4c6b1SDaniel Vetter ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1378e7b4c6b1SDaniel Vetter else 1379e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 1380036a4a7dSZhenyu Wang 1381ce99c256SDaniel Vetter if (de_iir & DE_AUX_CHANNEL_A) 1382ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1383ce99c256SDaniel Vetter 138401c66889SZhao Yakui if (de_iir & DE_GSE) 138581a07809SJani Nikula intel_opregion_asle_intr(dev); 138601c66889SZhao Yakui 138774d44445SDaniel Vetter if (de_iir & DE_PIPEA_VBLANK) 138874d44445SDaniel Vetter drm_handle_vblank(dev, 0); 138974d44445SDaniel Vetter 139074d44445SDaniel Vetter if (de_iir & DE_PIPEB_VBLANK) 139174d44445SDaniel Vetter drm_handle_vblank(dev, 1); 139274d44445SDaniel Vetter 1393de032bf4SPaulo Zanoni if (de_iir & DE_POISON) 1394de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1395de032bf4SPaulo Zanoni 13968664281bSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 13978664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 13988664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 13998664281bSPaulo Zanoni 14008664281bSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 14018664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 14028664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 14038664281bSPaulo Zanoni 1404f072d2e7SZhenyu Wang if (de_iir & DE_PLANEA_FLIP_DONE) { 1405013d5aa2SJesse Barnes intel_prepare_page_flip(dev, 0); 14062bbda389SChris Wilson intel_finish_page_flip_plane(dev, 0); 1407013d5aa2SJesse Barnes } 1408013d5aa2SJesse Barnes 1409f072d2e7SZhenyu Wang if (de_iir & DE_PLANEB_FLIP_DONE) { 1410f072d2e7SZhenyu Wang intel_prepare_page_flip(dev, 1); 14112bbda389SChris Wilson intel_finish_page_flip_plane(dev, 1); 1412013d5aa2SJesse Barnes } 1413c062df61SLi Peng 1414c650156aSZhenyu Wang /* check event from PCH */ 1415776ad806SJesse Barnes if (de_iir & DE_PCH_EVENT) { 1416acd15b6cSDaniel Vetter u32 pch_iir = I915_READ(SDEIIR); 1417acd15b6cSDaniel Vetter 141823e81d69SAdam Jackson if (HAS_PCH_CPT(dev)) 141923e81d69SAdam Jackson cpt_irq_handler(dev, pch_iir); 142023e81d69SAdam Jackson else 142123e81d69SAdam Jackson ibx_irq_handler(dev, pch_iir); 1422acd15b6cSDaniel Vetter 1423acd15b6cSDaniel Vetter /* should clear PCH hotplug event before clear CPU irq */ 1424acd15b6cSDaniel Vetter I915_WRITE(SDEIIR, pch_iir); 1425776ad806SJesse Barnes } 1426c650156aSZhenyu Wang 142773edd18fSDaniel Vetter if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 142873edd18fSDaniel Vetter ironlake_handle_rps_change(dev); 1429f97108d1SJesse Barnes 14304848405cSBen Widawsky if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) 1431fc6826d1SChris Wilson gen6_queue_rps_work(dev_priv, pm_iir); 14323b8d8d91SJesse Barnes 1433c7c85101SZou Nan hai I915_WRITE(GTIIR, gt_iir); 1434c7c85101SZou Nan hai I915_WRITE(DEIIR, de_iir); 14354912d041SBen Widawsky I915_WRITE(GEN6_PMIIR, pm_iir); 1436036a4a7dSZhenyu Wang 1437c7c85101SZou Nan hai done: 14382d109a84SZou, Nanhai I915_WRITE(DEIER, de_ier); 14393143a2bfSChris Wilson POSTING_READ(DEIER); 144044498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 144144498aeaSPaulo Zanoni POSTING_READ(SDEIER); 14422d109a84SZou, Nanhai 1443036a4a7dSZhenyu Wang return ret; 1444036a4a7dSZhenyu Wang } 1445036a4a7dSZhenyu Wang 14468a905236SJesse Barnes /** 14478a905236SJesse Barnes * i915_error_work_func - do process context error handling work 14488a905236SJesse Barnes * @work: work struct 14498a905236SJesse Barnes * 14508a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 14518a905236SJesse Barnes * was detected. 14528a905236SJesse Barnes */ 14538a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 14548a905236SJesse Barnes { 14551f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 14561f83fee0SDaniel Vetter work); 14571f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 14581f83fee0SDaniel Vetter gpu_error); 14598a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1460f69061beSDaniel Vetter struct intel_ring_buffer *ring; 1461f316a42cSBen Gamari char *error_event[] = { "ERROR=1", NULL }; 1462f316a42cSBen Gamari char *reset_event[] = { "RESET=1", NULL }; 1463f316a42cSBen Gamari char *reset_done_event[] = { "ERROR=0", NULL }; 1464f69061beSDaniel Vetter int i, ret; 14658a905236SJesse Barnes 1466f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 14678a905236SJesse Barnes 14687db0ba24SDaniel Vetter /* 14697db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 14707db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 14717db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 14727db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 14737db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 14747db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 14757db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 14767db0ba24SDaniel Vetter * work we don't need to worry about any other races. 14777db0ba24SDaniel Vetter */ 14787db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 147944d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 14807db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 14817db0ba24SDaniel Vetter reset_event); 14821f83fee0SDaniel Vetter 1483f69061beSDaniel Vetter ret = i915_reset(dev); 1484f69061beSDaniel Vetter 1485f69061beSDaniel Vetter if (ret == 0) { 1486f69061beSDaniel Vetter /* 1487f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1488f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1489f69061beSDaniel Vetter * complete. 1490f69061beSDaniel Vetter * 1491f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1492f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1493f69061beSDaniel Vetter * updates before 1494f69061beSDaniel Vetter * the counter increment. 1495f69061beSDaniel Vetter */ 1496f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1497f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1498f69061beSDaniel Vetter 1499f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1500f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 15011f83fee0SDaniel Vetter } else { 15021f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1503f316a42cSBen Gamari } 15041f83fee0SDaniel Vetter 1505f69061beSDaniel Vetter for_each_ring(ring, dev_priv, i) 1506f69061beSDaniel Vetter wake_up_all(&ring->irq_queue); 1507f69061beSDaniel Vetter 150896a02917SVille Syrjälä intel_display_handle_reset(dev); 150996a02917SVille Syrjälä 15101f83fee0SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 1511f316a42cSBen Gamari } 15128a905236SJesse Barnes } 15138a905236SJesse Barnes 151485f9e50dSDaniel Vetter /* NB: please notice the memset */ 151585f9e50dSDaniel Vetter static void i915_get_extra_instdone(struct drm_device *dev, 151685f9e50dSDaniel Vetter uint32_t *instdone) 151785f9e50dSDaniel Vetter { 151885f9e50dSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 151985f9e50dSDaniel Vetter memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 152085f9e50dSDaniel Vetter 152185f9e50dSDaniel Vetter switch(INTEL_INFO(dev)->gen) { 152285f9e50dSDaniel Vetter case 2: 152385f9e50dSDaniel Vetter case 3: 152485f9e50dSDaniel Vetter instdone[0] = I915_READ(INSTDONE); 152585f9e50dSDaniel Vetter break; 152685f9e50dSDaniel Vetter case 4: 152785f9e50dSDaniel Vetter case 5: 152885f9e50dSDaniel Vetter case 6: 152985f9e50dSDaniel Vetter instdone[0] = I915_READ(INSTDONE_I965); 153085f9e50dSDaniel Vetter instdone[1] = I915_READ(INSTDONE1); 153185f9e50dSDaniel Vetter break; 153285f9e50dSDaniel Vetter default: 153385f9e50dSDaniel Vetter WARN_ONCE(1, "Unsupported platform\n"); 153485f9e50dSDaniel Vetter case 7: 153585f9e50dSDaniel Vetter instdone[0] = I915_READ(GEN7_INSTDONE_1); 153685f9e50dSDaniel Vetter instdone[1] = I915_READ(GEN7_SC_INSTDONE); 153785f9e50dSDaniel Vetter instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 153885f9e50dSDaniel Vetter instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 153985f9e50dSDaniel Vetter break; 154085f9e50dSDaniel Vetter } 154185f9e50dSDaniel Vetter } 154285f9e50dSDaniel Vetter 15433bd3c932SChris Wilson #ifdef CONFIG_DEBUG_FS 15449df30794SChris Wilson static struct drm_i915_error_object * 1545d0d045e8SBen Widawsky i915_error_object_create_sized(struct drm_i915_private *dev_priv, 1546d0d045e8SBen Widawsky struct drm_i915_gem_object *src, 1547d0d045e8SBen Widawsky const int num_pages) 15489df30794SChris Wilson { 15499df30794SChris Wilson struct drm_i915_error_object *dst; 1550d0d045e8SBen Widawsky int i; 1551e56660ddSChris Wilson u32 reloc_offset; 15529df30794SChris Wilson 155305394f39SChris Wilson if (src == NULL || src->pages == NULL) 15549df30794SChris Wilson return NULL; 15559df30794SChris Wilson 1556d0d045e8SBen Widawsky dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); 15579df30794SChris Wilson if (dst == NULL) 15589df30794SChris Wilson return NULL; 15599df30794SChris Wilson 1560f343c5f6SBen Widawsky reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); 1561d0d045e8SBen Widawsky for (i = 0; i < num_pages; i++) { 1562788885aeSAndrew Morton unsigned long flags; 1563e56660ddSChris Wilson void *d; 1564788885aeSAndrew Morton 1565e56660ddSChris Wilson d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 15669df30794SChris Wilson if (d == NULL) 15679df30794SChris Wilson goto unwind; 1568e56660ddSChris Wilson 1569788885aeSAndrew Morton local_irq_save(flags); 15705d4545aeSBen Widawsky if (reloc_offset < dev_priv->gtt.mappable_end && 157174898d7eSDaniel Vetter src->has_global_gtt_mapping) { 1572172975aaSChris Wilson void __iomem *s; 1573172975aaSChris Wilson 1574172975aaSChris Wilson /* Simply ignore tiling or any overlapping fence. 1575172975aaSChris Wilson * It's part of the error state, and this hopefully 1576172975aaSChris Wilson * captures what the GPU read. 1577172975aaSChris Wilson */ 1578172975aaSChris Wilson 15795d4545aeSBen Widawsky s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 15803e4d3af5SPeter Zijlstra reloc_offset); 1581e56660ddSChris Wilson memcpy_fromio(d, s, PAGE_SIZE); 15823e4d3af5SPeter Zijlstra io_mapping_unmap_atomic(s); 1583960e3564SChris Wilson } else if (src->stolen) { 1584960e3564SChris Wilson unsigned long offset; 1585960e3564SChris Wilson 1586960e3564SChris Wilson offset = dev_priv->mm.stolen_base; 1587960e3564SChris Wilson offset += src->stolen->start; 1588960e3564SChris Wilson offset += i << PAGE_SHIFT; 1589960e3564SChris Wilson 15901a240d4dSDaniel Vetter memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); 1591172975aaSChris Wilson } else { 15929da3da66SChris Wilson struct page *page; 1593172975aaSChris Wilson void *s; 1594172975aaSChris Wilson 15959da3da66SChris Wilson page = i915_gem_object_get_page(src, i); 1596172975aaSChris Wilson 15979da3da66SChris Wilson drm_clflush_pages(&page, 1); 15989da3da66SChris Wilson 15999da3da66SChris Wilson s = kmap_atomic(page); 1600172975aaSChris Wilson memcpy(d, s, PAGE_SIZE); 1601172975aaSChris Wilson kunmap_atomic(s); 1602172975aaSChris Wilson 16039da3da66SChris Wilson drm_clflush_pages(&page, 1); 1604172975aaSChris Wilson } 1605788885aeSAndrew Morton local_irq_restore(flags); 1606e56660ddSChris Wilson 16079da3da66SChris Wilson dst->pages[i] = d; 1608e56660ddSChris Wilson 1609e56660ddSChris Wilson reloc_offset += PAGE_SIZE; 16109df30794SChris Wilson } 1611d0d045e8SBen Widawsky dst->page_count = num_pages; 16129df30794SChris Wilson 16139df30794SChris Wilson return dst; 16149df30794SChris Wilson 16159df30794SChris Wilson unwind: 16169da3da66SChris Wilson while (i--) 16179da3da66SChris Wilson kfree(dst->pages[i]); 16189df30794SChris Wilson kfree(dst); 16199df30794SChris Wilson return NULL; 16209df30794SChris Wilson } 1621d0d045e8SBen Widawsky #define i915_error_object_create(dev_priv, src) \ 1622d0d045e8SBen Widawsky i915_error_object_create_sized((dev_priv), (src), \ 1623d0d045e8SBen Widawsky (src)->base.size>>PAGE_SHIFT) 16249df30794SChris Wilson 16259df30794SChris Wilson static void 16269df30794SChris Wilson i915_error_object_free(struct drm_i915_error_object *obj) 16279df30794SChris Wilson { 16289df30794SChris Wilson int page; 16299df30794SChris Wilson 16309df30794SChris Wilson if (obj == NULL) 16319df30794SChris Wilson return; 16329df30794SChris Wilson 16339df30794SChris Wilson for (page = 0; page < obj->page_count; page++) 16349df30794SChris Wilson kfree(obj->pages[page]); 16359df30794SChris Wilson 16369df30794SChris Wilson kfree(obj); 16379df30794SChris Wilson } 16389df30794SChris Wilson 1639742cbee8SDaniel Vetter void 1640742cbee8SDaniel Vetter i915_error_state_free(struct kref *error_ref) 16419df30794SChris Wilson { 1642742cbee8SDaniel Vetter struct drm_i915_error_state *error = container_of(error_ref, 1643742cbee8SDaniel Vetter typeof(*error), ref); 1644e2f973d5SChris Wilson int i; 1645e2f973d5SChris Wilson 164652d39a21SChris Wilson for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 164752d39a21SChris Wilson i915_error_object_free(error->ring[i].batchbuffer); 164852d39a21SChris Wilson i915_error_object_free(error->ring[i].ringbuffer); 16497ed73da0SBen Widawsky i915_error_object_free(error->ring[i].ctx); 165052d39a21SChris Wilson kfree(error->ring[i].requests); 165152d39a21SChris Wilson } 1652e2f973d5SChris Wilson 16539df30794SChris Wilson kfree(error->active_bo); 16546ef3d427SChris Wilson kfree(error->overlay); 16557ed73da0SBen Widawsky kfree(error->display); 16569df30794SChris Wilson kfree(error); 16579df30794SChris Wilson } 16581b50247aSChris Wilson static void capture_bo(struct drm_i915_error_buffer *err, 16591b50247aSChris Wilson struct drm_i915_gem_object *obj) 1660c724e8a9SChris Wilson { 1661c724e8a9SChris Wilson err->size = obj->base.size; 1662c724e8a9SChris Wilson err->name = obj->base.name; 16630201f1ecSChris Wilson err->rseqno = obj->last_read_seqno; 16640201f1ecSChris Wilson err->wseqno = obj->last_write_seqno; 1665f343c5f6SBen Widawsky err->gtt_offset = i915_gem_obj_ggtt_offset(obj); 1666c724e8a9SChris Wilson err->read_domains = obj->base.read_domains; 1667c724e8a9SChris Wilson err->write_domain = obj->base.write_domain; 1668c724e8a9SChris Wilson err->fence_reg = obj->fence_reg; 1669c724e8a9SChris Wilson err->pinned = 0; 1670c724e8a9SChris Wilson if (obj->pin_count > 0) 1671c724e8a9SChris Wilson err->pinned = 1; 1672c724e8a9SChris Wilson if (obj->user_pin_count > 0) 1673c724e8a9SChris Wilson err->pinned = -1; 1674c724e8a9SChris Wilson err->tiling = obj->tiling_mode; 1675c724e8a9SChris Wilson err->dirty = obj->dirty; 1676c724e8a9SChris Wilson err->purgeable = obj->madv != I915_MADV_WILLNEED; 167796154f2fSDaniel Vetter err->ring = obj->ring ? obj->ring->id : -1; 167893dfb40cSChris Wilson err->cache_level = obj->cache_level; 16791b50247aSChris Wilson } 1680c724e8a9SChris Wilson 16811b50247aSChris Wilson static u32 capture_active_bo(struct drm_i915_error_buffer *err, 16821b50247aSChris Wilson int count, struct list_head *head) 16831b50247aSChris Wilson { 16841b50247aSChris Wilson struct drm_i915_gem_object *obj; 16851b50247aSChris Wilson int i = 0; 16861b50247aSChris Wilson 16871b50247aSChris Wilson list_for_each_entry(obj, head, mm_list) { 16881b50247aSChris Wilson capture_bo(err++, obj); 1689c724e8a9SChris Wilson if (++i == count) 1690c724e8a9SChris Wilson break; 16911b50247aSChris Wilson } 1692c724e8a9SChris Wilson 16931b50247aSChris Wilson return i; 16941b50247aSChris Wilson } 16951b50247aSChris Wilson 16961b50247aSChris Wilson static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 16971b50247aSChris Wilson int count, struct list_head *head) 16981b50247aSChris Wilson { 16991b50247aSChris Wilson struct drm_i915_gem_object *obj; 17001b50247aSChris Wilson int i = 0; 17011b50247aSChris Wilson 170235c20a60SBen Widawsky list_for_each_entry(obj, head, global_list) { 17031b50247aSChris Wilson if (obj->pin_count == 0) 17041b50247aSChris Wilson continue; 17051b50247aSChris Wilson 17061b50247aSChris Wilson capture_bo(err++, obj); 17071b50247aSChris Wilson if (++i == count) 17081b50247aSChris Wilson break; 1709c724e8a9SChris Wilson } 1710c724e8a9SChris Wilson 1711c724e8a9SChris Wilson return i; 1712c724e8a9SChris Wilson } 1713c724e8a9SChris Wilson 1714748ebc60SChris Wilson static void i915_gem_record_fences(struct drm_device *dev, 1715748ebc60SChris Wilson struct drm_i915_error_state *error) 1716748ebc60SChris Wilson { 1717748ebc60SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1718748ebc60SChris Wilson int i; 1719748ebc60SChris Wilson 1720748ebc60SChris Wilson /* Fences */ 1721748ebc60SChris Wilson switch (INTEL_INFO(dev)->gen) { 1722775d17b6SDaniel Vetter case 7: 1723748ebc60SChris Wilson case 6: 172442b5aeabSVille Syrjälä for (i = 0; i < dev_priv->num_fence_regs; i++) 1725748ebc60SChris Wilson error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1726748ebc60SChris Wilson break; 1727748ebc60SChris Wilson case 5: 1728748ebc60SChris Wilson case 4: 1729748ebc60SChris Wilson for (i = 0; i < 16; i++) 1730748ebc60SChris Wilson error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1731748ebc60SChris Wilson break; 1732748ebc60SChris Wilson case 3: 1733748ebc60SChris Wilson if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1734748ebc60SChris Wilson for (i = 0; i < 8; i++) 1735748ebc60SChris Wilson error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1736748ebc60SChris Wilson case 2: 1737748ebc60SChris Wilson for (i = 0; i < 8; i++) 1738748ebc60SChris Wilson error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1739748ebc60SChris Wilson break; 1740748ebc60SChris Wilson 17417dbf9d6eSBen Widawsky default: 17427dbf9d6eSBen Widawsky BUG(); 1743748ebc60SChris Wilson } 1744748ebc60SChris Wilson } 1745748ebc60SChris Wilson 1746bcfb2e28SChris Wilson static struct drm_i915_error_object * 1747bcfb2e28SChris Wilson i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1748bcfb2e28SChris Wilson struct intel_ring_buffer *ring) 1749bcfb2e28SChris Wilson { 1750bcfb2e28SChris Wilson struct drm_i915_gem_object *obj; 1751bcfb2e28SChris Wilson u32 seqno; 1752bcfb2e28SChris Wilson 1753bcfb2e28SChris Wilson if (!ring->get_seqno) 1754bcfb2e28SChris Wilson return NULL; 1755bcfb2e28SChris Wilson 1756b45305fcSDaniel Vetter if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1757b45305fcSDaniel Vetter u32 acthd = I915_READ(ACTHD); 1758b45305fcSDaniel Vetter 1759b45305fcSDaniel Vetter if (WARN_ON(ring->id != RCS)) 1760b45305fcSDaniel Vetter return NULL; 1761b45305fcSDaniel Vetter 1762b45305fcSDaniel Vetter obj = ring->private; 1763f343c5f6SBen Widawsky if (acthd >= i915_gem_obj_ggtt_offset(obj) && 1764f343c5f6SBen Widawsky acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 1765b45305fcSDaniel Vetter return i915_error_object_create(dev_priv, obj); 1766b45305fcSDaniel Vetter } 1767b45305fcSDaniel Vetter 1768b2eadbc8SChris Wilson seqno = ring->get_seqno(ring, false); 1769bcfb2e28SChris Wilson list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1770bcfb2e28SChris Wilson if (obj->ring != ring) 1771bcfb2e28SChris Wilson continue; 1772bcfb2e28SChris Wilson 17730201f1ecSChris Wilson if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1774bcfb2e28SChris Wilson continue; 1775bcfb2e28SChris Wilson 1776bcfb2e28SChris Wilson if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1777bcfb2e28SChris Wilson continue; 1778bcfb2e28SChris Wilson 1779bcfb2e28SChris Wilson /* We need to copy these to an anonymous buffer as the simplest 1780bcfb2e28SChris Wilson * method to avoid being overwritten by userspace. 1781bcfb2e28SChris Wilson */ 1782bcfb2e28SChris Wilson return i915_error_object_create(dev_priv, obj); 1783bcfb2e28SChris Wilson } 1784bcfb2e28SChris Wilson 1785bcfb2e28SChris Wilson return NULL; 1786bcfb2e28SChris Wilson } 1787bcfb2e28SChris Wilson 1788d27b1e0eSDaniel Vetter static void i915_record_ring_state(struct drm_device *dev, 1789d27b1e0eSDaniel Vetter struct drm_i915_error_state *error, 1790d27b1e0eSDaniel Vetter struct intel_ring_buffer *ring) 1791d27b1e0eSDaniel Vetter { 1792d27b1e0eSDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 1793d27b1e0eSDaniel Vetter 179433f3f518SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 179512f55818SChris Wilson error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 179633f3f518SDaniel Vetter error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 17977e3b8737SDaniel Vetter error->semaphore_mboxes[ring->id][0] 17987e3b8737SDaniel Vetter = I915_READ(RING_SYNC_0(ring->mmio_base)); 17997e3b8737SDaniel Vetter error->semaphore_mboxes[ring->id][1] 18007e3b8737SDaniel Vetter = I915_READ(RING_SYNC_1(ring->mmio_base)); 1801df2b23d9SChris Wilson error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1802df2b23d9SChris Wilson error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 180333f3f518SDaniel Vetter } 1804c1cd90edSDaniel Vetter 1805d27b1e0eSDaniel Vetter if (INTEL_INFO(dev)->gen >= 4) { 18069d2f41faSDaniel Vetter error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1807d27b1e0eSDaniel Vetter error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1808d27b1e0eSDaniel Vetter error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1809d27b1e0eSDaniel Vetter error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1810c1cd90edSDaniel Vetter error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1811050ee91fSBen Widawsky if (ring->id == RCS) 1812d27b1e0eSDaniel Vetter error->bbaddr = I915_READ64(BB_ADDR); 1813d27b1e0eSDaniel Vetter } else { 18149d2f41faSDaniel Vetter error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1815d27b1e0eSDaniel Vetter error->ipeir[ring->id] = I915_READ(IPEIR); 1816d27b1e0eSDaniel Vetter error->ipehr[ring->id] = I915_READ(IPEHR); 1817d27b1e0eSDaniel Vetter error->instdone[ring->id] = I915_READ(INSTDONE); 1818d27b1e0eSDaniel Vetter } 1819d27b1e0eSDaniel Vetter 18209574b3feSBen Widawsky error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1821c1cd90edSDaniel Vetter error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1822b2eadbc8SChris Wilson error->seqno[ring->id] = ring->get_seqno(ring, false); 1823d27b1e0eSDaniel Vetter error->acthd[ring->id] = intel_ring_get_active_head(ring); 1824c1cd90edSDaniel Vetter error->head[ring->id] = I915_READ_HEAD(ring); 1825c1cd90edSDaniel Vetter error->tail[ring->id] = I915_READ_TAIL(ring); 18260f3b6849SChris Wilson error->ctl[ring->id] = I915_READ_CTL(ring); 18277e3b8737SDaniel Vetter 18287e3b8737SDaniel Vetter error->cpu_ring_head[ring->id] = ring->head; 18297e3b8737SDaniel Vetter error->cpu_ring_tail[ring->id] = ring->tail; 1830d27b1e0eSDaniel Vetter } 1831d27b1e0eSDaniel Vetter 18328c123e54SBen Widawsky 18338c123e54SBen Widawsky static void i915_gem_record_active_context(struct intel_ring_buffer *ring, 18348c123e54SBen Widawsky struct drm_i915_error_state *error, 18358c123e54SBen Widawsky struct drm_i915_error_ring *ering) 18368c123e54SBen Widawsky { 18378c123e54SBen Widawsky struct drm_i915_private *dev_priv = ring->dev->dev_private; 18388c123e54SBen Widawsky struct drm_i915_gem_object *obj; 18398c123e54SBen Widawsky 18408c123e54SBen Widawsky /* Currently render ring is the only HW context user */ 18418c123e54SBen Widawsky if (ring->id != RCS || !error->ccid) 18428c123e54SBen Widawsky return; 18438c123e54SBen Widawsky 184435c20a60SBen Widawsky list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 1845f343c5f6SBen Widawsky if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { 18468c123e54SBen Widawsky ering->ctx = i915_error_object_create_sized(dev_priv, 18478c123e54SBen Widawsky obj, 1); 18483ef8fb5aSDamien Lespiau break; 18498c123e54SBen Widawsky } 18508c123e54SBen Widawsky } 18518c123e54SBen Widawsky } 18528c123e54SBen Widawsky 185352d39a21SChris Wilson static void i915_gem_record_rings(struct drm_device *dev, 185452d39a21SChris Wilson struct drm_i915_error_state *error) 185552d39a21SChris Wilson { 185652d39a21SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1857b4519513SChris Wilson struct intel_ring_buffer *ring; 185852d39a21SChris Wilson struct drm_i915_gem_request *request; 185952d39a21SChris Wilson int i, count; 186052d39a21SChris Wilson 1861b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 186252d39a21SChris Wilson i915_record_ring_state(dev, error, ring); 186352d39a21SChris Wilson 186452d39a21SChris Wilson error->ring[i].batchbuffer = 186552d39a21SChris Wilson i915_error_first_batchbuffer(dev_priv, ring); 186652d39a21SChris Wilson 186752d39a21SChris Wilson error->ring[i].ringbuffer = 186852d39a21SChris Wilson i915_error_object_create(dev_priv, ring->obj); 186952d39a21SChris Wilson 18708c123e54SBen Widawsky 18718c123e54SBen Widawsky i915_gem_record_active_context(ring, error, &error->ring[i]); 18728c123e54SBen Widawsky 187352d39a21SChris Wilson count = 0; 187452d39a21SChris Wilson list_for_each_entry(request, &ring->request_list, list) 187552d39a21SChris Wilson count++; 187652d39a21SChris Wilson 187752d39a21SChris Wilson error->ring[i].num_requests = count; 187852d39a21SChris Wilson error->ring[i].requests = 187952d39a21SChris Wilson kmalloc(count*sizeof(struct drm_i915_error_request), 188052d39a21SChris Wilson GFP_ATOMIC); 188152d39a21SChris Wilson if (error->ring[i].requests == NULL) { 188252d39a21SChris Wilson error->ring[i].num_requests = 0; 188352d39a21SChris Wilson continue; 188452d39a21SChris Wilson } 188552d39a21SChris Wilson 188652d39a21SChris Wilson count = 0; 188752d39a21SChris Wilson list_for_each_entry(request, &ring->request_list, list) { 188852d39a21SChris Wilson struct drm_i915_error_request *erq; 188952d39a21SChris Wilson 189052d39a21SChris Wilson erq = &error->ring[i].requests[count++]; 189152d39a21SChris Wilson erq->seqno = request->seqno; 189252d39a21SChris Wilson erq->jiffies = request->emitted_jiffies; 1893ee4f42b1SChris Wilson erq->tail = request->tail; 189452d39a21SChris Wilson } 189552d39a21SChris Wilson } 189652d39a21SChris Wilson } 189752d39a21SChris Wilson 189826b7c224SBen Widawsky static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, 189926b7c224SBen Widawsky struct drm_i915_error_state *error) 190026b7c224SBen Widawsky { 190126b7c224SBen Widawsky struct drm_i915_gem_object *obj; 190226b7c224SBen Widawsky int i; 190326b7c224SBen Widawsky 190426b7c224SBen Widawsky i = 0; 190526b7c224SBen Widawsky list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 190626b7c224SBen Widawsky i++; 190726b7c224SBen Widawsky error->active_bo_count = i; 190826b7c224SBen Widawsky list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 190926b7c224SBen Widawsky if (obj->pin_count) 191026b7c224SBen Widawsky i++; 191126b7c224SBen Widawsky error->pinned_bo_count = i - error->active_bo_count; 191226b7c224SBen Widawsky 191326b7c224SBen Widawsky if (i) { 191426b7c224SBen Widawsky error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 191526b7c224SBen Widawsky GFP_ATOMIC); 191626b7c224SBen Widawsky if (error->active_bo) 191726b7c224SBen Widawsky error->pinned_bo = 191826b7c224SBen Widawsky error->active_bo + error->active_bo_count; 191926b7c224SBen Widawsky } 192026b7c224SBen Widawsky 192126b7c224SBen Widawsky if (error->active_bo) 192226b7c224SBen Widawsky error->active_bo_count = 192326b7c224SBen Widawsky capture_active_bo(error->active_bo, 192426b7c224SBen Widawsky error->active_bo_count, 192526b7c224SBen Widawsky &dev_priv->mm.active_list); 192626b7c224SBen Widawsky 192726b7c224SBen Widawsky if (error->pinned_bo) 192826b7c224SBen Widawsky error->pinned_bo_count = 192926b7c224SBen Widawsky capture_pinned_bo(error->pinned_bo, 193026b7c224SBen Widawsky error->pinned_bo_count, 193126b7c224SBen Widawsky &dev_priv->mm.bound_list); 193226b7c224SBen Widawsky } 193326b7c224SBen Widawsky 19348a905236SJesse Barnes /** 19358a905236SJesse Barnes * i915_capture_error_state - capture an error record for later analysis 19368a905236SJesse Barnes * @dev: drm device 19378a905236SJesse Barnes * 19388a905236SJesse Barnes * Should be called when an error is detected (either a hang or an error 19398a905236SJesse Barnes * interrupt) to capture error state from the time of the error. Fills 19408a905236SJesse Barnes * out a structure which becomes available in debugfs for user level tools 19418a905236SJesse Barnes * to pick up. 19428a905236SJesse Barnes */ 194363eeaf38SJesse Barnes static void i915_capture_error_state(struct drm_device *dev) 194463eeaf38SJesse Barnes { 194563eeaf38SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 194663eeaf38SJesse Barnes struct drm_i915_error_state *error; 194763eeaf38SJesse Barnes unsigned long flags; 194826b7c224SBen Widawsky int pipe; 194963eeaf38SJesse Barnes 195099584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 195199584db3SDaniel Vetter error = dev_priv->gpu_error.first_error; 195299584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 19539df30794SChris Wilson if (error) 19549df30794SChris Wilson return; 195563eeaf38SJesse Barnes 19569db4a9c7SJesse Barnes /* Account for pipe specific data like PIPE*STAT */ 195733f3f518SDaniel Vetter error = kzalloc(sizeof(*error), GFP_ATOMIC); 195863eeaf38SJesse Barnes if (!error) { 19599df30794SChris Wilson DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 19609df30794SChris Wilson return; 196163eeaf38SJesse Barnes } 196263eeaf38SJesse Barnes 19632f86f191SBen Widawsky DRM_INFO("capturing error event; look for more information in " 1964ef86ddceSMika Kuoppala "/sys/class/drm/card%d/error\n", dev->primary->index); 19652fa772f3SChris Wilson 1966742cbee8SDaniel Vetter kref_init(&error->ref); 196763eeaf38SJesse Barnes error->eir = I915_READ(EIR); 196863eeaf38SJesse Barnes error->pgtbl_er = I915_READ(PGTBL_ER); 1969211816ecSBen Widawsky if (HAS_HW_CONTEXTS(dev)) 1970b9a3906bSBen Widawsky error->ccid = I915_READ(CCID); 1971be998e2eSBen Widawsky 1972be998e2eSBen Widawsky if (HAS_PCH_SPLIT(dev)) 1973be998e2eSBen Widawsky error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1974be998e2eSBen Widawsky else if (IS_VALLEYVIEW(dev)) 1975be998e2eSBen Widawsky error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1976be998e2eSBen Widawsky else if (IS_GEN2(dev)) 1977be998e2eSBen Widawsky error->ier = I915_READ16(IER); 1978be998e2eSBen Widawsky else 1979be998e2eSBen Widawsky error->ier = I915_READ(IER); 1980be998e2eSBen Widawsky 19810f3b6849SChris Wilson if (INTEL_INFO(dev)->gen >= 6) 19820f3b6849SChris Wilson error->derrmr = I915_READ(DERRMR); 19830f3b6849SChris Wilson 19840f3b6849SChris Wilson if (IS_VALLEYVIEW(dev)) 19850f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE_VLV); 19860f3b6849SChris Wilson else if (INTEL_INFO(dev)->gen >= 7) 19870f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE_MT); 19880f3b6849SChris Wilson else if (INTEL_INFO(dev)->gen == 6) 19890f3b6849SChris Wilson error->forcewake = I915_READ(FORCEWAKE); 19900f3b6849SChris Wilson 19914f3308b9SPaulo Zanoni if (!HAS_PCH_SPLIT(dev)) 19929db4a9c7SJesse Barnes for_each_pipe(pipe) 19939db4a9c7SJesse Barnes error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1994d27b1e0eSDaniel Vetter 199533f3f518SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 1996f406839fSChris Wilson error->error = I915_READ(ERROR_GEN6); 199733f3f518SDaniel Vetter error->done_reg = I915_READ(DONE_REG); 199833f3f518SDaniel Vetter } 1999add354ddSChris Wilson 200071e172e8SBen Widawsky if (INTEL_INFO(dev)->gen == 7) 200171e172e8SBen Widawsky error->err_int = I915_READ(GEN7_ERR_INT); 200271e172e8SBen Widawsky 2003050ee91fSBen Widawsky i915_get_extra_instdone(dev, error->extra_instdone); 2004050ee91fSBen Widawsky 200526b7c224SBen Widawsky i915_gem_capture_buffers(dev_priv, error); 2006748ebc60SChris Wilson i915_gem_record_fences(dev, error); 200752d39a21SChris Wilson i915_gem_record_rings(dev, error); 20089df30794SChris Wilson 20098a905236SJesse Barnes do_gettimeofday(&error->time); 20108a905236SJesse Barnes 20116ef3d427SChris Wilson error->overlay = intel_overlay_capture_error_state(dev); 2012c4a1d9e4SChris Wilson error->display = intel_display_capture_error_state(dev); 20136ef3d427SChris Wilson 201499584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 201599584db3SDaniel Vetter if (dev_priv->gpu_error.first_error == NULL) { 201699584db3SDaniel Vetter dev_priv->gpu_error.first_error = error; 20179df30794SChris Wilson error = NULL; 20189df30794SChris Wilson } 201999584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 20209df30794SChris Wilson 20219df30794SChris Wilson if (error) 2022742cbee8SDaniel Vetter i915_error_state_free(&error->ref); 20239df30794SChris Wilson } 20249df30794SChris Wilson 20259df30794SChris Wilson void i915_destroy_error_state(struct drm_device *dev) 20269df30794SChris Wilson { 20279df30794SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 20289df30794SChris Wilson struct drm_i915_error_state *error; 20296dc0e816SBen Widawsky unsigned long flags; 20309df30794SChris Wilson 203199584db3SDaniel Vetter spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 203299584db3SDaniel Vetter error = dev_priv->gpu_error.first_error; 203399584db3SDaniel Vetter dev_priv->gpu_error.first_error = NULL; 203499584db3SDaniel Vetter spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 20359df30794SChris Wilson 20369df30794SChris Wilson if (error) 2037742cbee8SDaniel Vetter kref_put(&error->ref, i915_error_state_free); 203863eeaf38SJesse Barnes } 20393bd3c932SChris Wilson #else 20403bd3c932SChris Wilson #define i915_capture_error_state(x) 20413bd3c932SChris Wilson #endif 204263eeaf38SJesse Barnes 204335aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 2044c0e09200SDave Airlie { 20458a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 2046bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 204763eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 2048050ee91fSBen Widawsky int pipe, i; 204963eeaf38SJesse Barnes 205035aed2e6SChris Wilson if (!eir) 205135aed2e6SChris Wilson return; 205263eeaf38SJesse Barnes 2053a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 20548a905236SJesse Barnes 2055bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 2056bd9854f9SBen Widawsky 20578a905236SJesse Barnes if (IS_G4X(dev)) { 20588a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 20598a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 20608a905236SJesse Barnes 2061a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2062a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2063050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 2064050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2065a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2066a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 20678a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 20683143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 20698a905236SJesse Barnes } 20708a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 20718a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2072a70491ccSJoe Perches pr_err("page table error\n"); 2073a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 20748a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20753143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 20768a905236SJesse Barnes } 20778a905236SJesse Barnes } 20788a905236SJesse Barnes 2079a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 208063eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 208163eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 2082a70491ccSJoe Perches pr_err("page table error\n"); 2083a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 208463eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 20853143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 208663eeaf38SJesse Barnes } 20878a905236SJesse Barnes } 20888a905236SJesse Barnes 208963eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 2090a70491ccSJoe Perches pr_err("memory refresh error:\n"); 20919db4a9c7SJesse Barnes for_each_pipe(pipe) 2092a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 20939db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 209463eeaf38SJesse Barnes /* pipestat has already been acked */ 209563eeaf38SJesse Barnes } 209663eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 2097a70491ccSJoe Perches pr_err("instruction error\n"); 2098a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2099050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 2100050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2101a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 210263eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 210363eeaf38SJesse Barnes 2104a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2105a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2106a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 210763eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 21083143a2bfSChris Wilson POSTING_READ(IPEIR); 210963eeaf38SJesse Barnes } else { 211063eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 211163eeaf38SJesse Barnes 2112a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2113a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2114a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2115a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 211663eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 21173143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 211863eeaf38SJesse Barnes } 211963eeaf38SJesse Barnes } 212063eeaf38SJesse Barnes 212163eeaf38SJesse Barnes I915_WRITE(EIR, eir); 21223143a2bfSChris Wilson POSTING_READ(EIR); 212363eeaf38SJesse Barnes eir = I915_READ(EIR); 212463eeaf38SJesse Barnes if (eir) { 212563eeaf38SJesse Barnes /* 212663eeaf38SJesse Barnes * some errors might have become stuck, 212763eeaf38SJesse Barnes * mask them. 212863eeaf38SJesse Barnes */ 212963eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 213063eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 213163eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 213263eeaf38SJesse Barnes } 213335aed2e6SChris Wilson } 213435aed2e6SChris Wilson 213535aed2e6SChris Wilson /** 213635aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 213735aed2e6SChris Wilson * @dev: drm device 213835aed2e6SChris Wilson * 213935aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 214035aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 214135aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 214235aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 214335aed2e6SChris Wilson * of a ring dump etc.). 214435aed2e6SChris Wilson */ 2145527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 214635aed2e6SChris Wilson { 214735aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 2148b4519513SChris Wilson struct intel_ring_buffer *ring; 2149b4519513SChris Wilson int i; 215035aed2e6SChris Wilson 215135aed2e6SChris Wilson i915_capture_error_state(dev); 215235aed2e6SChris Wilson i915_report_and_clear_eir(dev); 21538a905236SJesse Barnes 2154ba1234d1SBen Gamari if (wedged) { 2155f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2156f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 2157ba1234d1SBen Gamari 215811ed50ecSBen Gamari /* 21591f83fee0SDaniel Vetter * Wakeup waiting processes so that the reset work item 21601f83fee0SDaniel Vetter * doesn't deadlock trying to grab various locks. 216111ed50ecSBen Gamari */ 2162b4519513SChris Wilson for_each_ring(ring, dev_priv, i) 2163b4519513SChris Wilson wake_up_all(&ring->irq_queue); 216411ed50ecSBen Gamari } 216511ed50ecSBen Gamari 216699584db3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 21678a905236SJesse Barnes } 21688a905236SJesse Barnes 216921ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 21704e5359cdSSimon Farnsworth { 21714e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 21724e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 21734e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 217405394f39SChris Wilson struct drm_i915_gem_object *obj; 21754e5359cdSSimon Farnsworth struct intel_unpin_work *work; 21764e5359cdSSimon Farnsworth unsigned long flags; 21774e5359cdSSimon Farnsworth bool stall_detected; 21784e5359cdSSimon Farnsworth 21794e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 21804e5359cdSSimon Farnsworth if (intel_crtc == NULL) 21814e5359cdSSimon Farnsworth return; 21824e5359cdSSimon Farnsworth 21834e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 21844e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 21854e5359cdSSimon Farnsworth 2186e7d841caSChris Wilson if (work == NULL || 2187e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2188e7d841caSChris Wilson !work->enable_stall_check) { 21894e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 21904e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 21914e5359cdSSimon Farnsworth return; 21924e5359cdSSimon Farnsworth } 21934e5359cdSSimon Farnsworth 21944e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 219505394f39SChris Wilson obj = work->pending_flip_obj; 2196a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 21979db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 2198446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2199f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 22004e5359cdSSimon Farnsworth } else { 22019db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 2202f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 220301f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 22044e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 22054e5359cdSSimon Farnsworth } 22064e5359cdSSimon Farnsworth 22074e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 22084e5359cdSSimon Farnsworth 22094e5359cdSSimon Farnsworth if (stall_detected) { 22104e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 22114e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 22124e5359cdSSimon Farnsworth } 22134e5359cdSSimon Farnsworth } 22144e5359cdSSimon Farnsworth 221542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 221642f52ef8SKeith Packard * we use as a pipe index 221742f52ef8SKeith Packard */ 2218f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 22190a3e67a4SJesse Barnes { 22200a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2221e9d21d7fSKeith Packard unsigned long irqflags; 222271e0ffa5SJesse Barnes 22235eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 222471e0ffa5SJesse Barnes return -EINVAL; 22250a3e67a4SJesse Barnes 22261ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2227f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 22287c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 22297c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 22300a3e67a4SJesse Barnes else 22317c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 22327c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 22338692d00eSChris Wilson 22348692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 22358692d00eSChris Wilson if (dev_priv->info->gen == 3) 22366b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 22371ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22388692d00eSChris Wilson 22390a3e67a4SJesse Barnes return 0; 22400a3e67a4SJesse Barnes } 22410a3e67a4SJesse Barnes 2242f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2243f796cf8fSJesse Barnes { 2244f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2245f796cf8fSJesse Barnes unsigned long irqflags; 2246f796cf8fSJesse Barnes 2247f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2248f796cf8fSJesse Barnes return -EINVAL; 2249f796cf8fSJesse Barnes 2250f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2251f796cf8fSJesse Barnes ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 2252f796cf8fSJesse Barnes DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2253f796cf8fSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2254f796cf8fSJesse Barnes 2255f796cf8fSJesse Barnes return 0; 2256f796cf8fSJesse Barnes } 2257f796cf8fSJesse Barnes 2258f71d4af4SJesse Barnes static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 2259b1f14ad0SJesse Barnes { 2260b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2261b1f14ad0SJesse Barnes unsigned long irqflags; 2262b1f14ad0SJesse Barnes 2263b1f14ad0SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 2264b1f14ad0SJesse Barnes return -EINVAL; 2265b1f14ad0SJesse Barnes 2266b1f14ad0SJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2267b615b57aSChris Wilson ironlake_enable_display_irq(dev_priv, 2268b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB << (5 * pipe)); 2269b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2270b1f14ad0SJesse Barnes 2271b1f14ad0SJesse Barnes return 0; 2272b1f14ad0SJesse Barnes } 2273b1f14ad0SJesse Barnes 22747e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 22757e231dbeSJesse Barnes { 22767e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22777e231dbeSJesse Barnes unsigned long irqflags; 227831acc7f5SJesse Barnes u32 imr; 22797e231dbeSJesse Barnes 22807e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 22817e231dbeSJesse Barnes return -EINVAL; 22827e231dbeSJesse Barnes 22837e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 22847e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 228531acc7f5SJesse Barnes if (pipe == 0) 22867e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 228731acc7f5SJesse Barnes else 22887e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22897e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 229031acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 229131acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 22927e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 22937e231dbeSJesse Barnes 22947e231dbeSJesse Barnes return 0; 22957e231dbeSJesse Barnes } 22967e231dbeSJesse Barnes 229742f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 229842f52ef8SKeith Packard * we use as a pipe index 229942f52ef8SKeith Packard */ 2300f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 23010a3e67a4SJesse Barnes { 23020a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2303e9d21d7fSKeith Packard unsigned long irqflags; 23040a3e67a4SJesse Barnes 23051ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 23068692d00eSChris Wilson if (dev_priv->info->gen == 3) 23076b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 23088692d00eSChris Wilson 23097c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 23107c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 23117c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 23121ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 23130a3e67a4SJesse Barnes } 23140a3e67a4SJesse Barnes 2315f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2316f796cf8fSJesse Barnes { 2317f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2318f796cf8fSJesse Barnes unsigned long irqflags; 2319f796cf8fSJesse Barnes 2320f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2321f796cf8fSJesse Barnes ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 2322f796cf8fSJesse Barnes DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 2323f796cf8fSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2324f796cf8fSJesse Barnes } 2325f796cf8fSJesse Barnes 2326f71d4af4SJesse Barnes static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 2327b1f14ad0SJesse Barnes { 2328b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2329b1f14ad0SJesse Barnes unsigned long irqflags; 2330b1f14ad0SJesse Barnes 2331b1f14ad0SJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2332b615b57aSChris Wilson ironlake_disable_display_irq(dev_priv, 2333b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB << (pipe * 5)); 2334b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2335b1f14ad0SJesse Barnes } 2336b1f14ad0SJesse Barnes 23377e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 23387e231dbeSJesse Barnes { 23397e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 23407e231dbeSJesse Barnes unsigned long irqflags; 234131acc7f5SJesse Barnes u32 imr; 23427e231dbeSJesse Barnes 23437e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 234431acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 234531acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 23467e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 234731acc7f5SJesse Barnes if (pipe == 0) 23487e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 234931acc7f5SJesse Barnes else 23507e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 23517e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 23527e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 23537e231dbeSJesse Barnes } 23547e231dbeSJesse Barnes 2355893eead0SChris Wilson static u32 2356893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 2357852835f3SZou Nan hai { 2358893eead0SChris Wilson return list_entry(ring->request_list.prev, 2359893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 2360893eead0SChris Wilson } 2361893eead0SChris Wilson 23629107e9d2SChris Wilson static bool 23639107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2364893eead0SChris Wilson { 23659107e9d2SChris Wilson return (list_empty(&ring->request_list) || 23669107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 2367f65d9421SBen Gamari } 2368f65d9421SBen Gamari 23696274f212SChris Wilson static struct intel_ring_buffer * 23706274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2371a24a11e6SChris Wilson { 2372a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 23736274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 2374a24a11e6SChris Wilson 2375a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2376a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 2377a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 23786274f212SChris Wilson return NULL; 2379a24a11e6SChris Wilson 2380a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 2381a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 2382a24a11e6SChris Wilson */ 23836274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2384a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 2385a24a11e6SChris Wilson do { 2386a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 2387a24a11e6SChris Wilson if (cmd == ipehr) 2388a24a11e6SChris Wilson break; 2389a24a11e6SChris Wilson 2390a24a11e6SChris Wilson acthd -= 4; 2391a24a11e6SChris Wilson if (acthd < acthd_min) 23926274f212SChris Wilson return NULL; 2393a24a11e6SChris Wilson } while (1); 2394a24a11e6SChris Wilson 23956274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 23966274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2397a24a11e6SChris Wilson } 2398a24a11e6SChris Wilson 23996274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 24006274f212SChris Wilson { 24016274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 24026274f212SChris Wilson struct intel_ring_buffer *signaller; 24036274f212SChris Wilson u32 seqno, ctl; 24046274f212SChris Wilson 24056274f212SChris Wilson ring->hangcheck.deadlock = true; 24066274f212SChris Wilson 24076274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 24086274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 24096274f212SChris Wilson return -1; 24106274f212SChris Wilson 24116274f212SChris Wilson /* cursory check for an unkickable deadlock */ 24126274f212SChris Wilson ctl = I915_READ_CTL(signaller); 24136274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 24146274f212SChris Wilson return -1; 24156274f212SChris Wilson 24166274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 24176274f212SChris Wilson } 24186274f212SChris Wilson 24196274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 24206274f212SChris Wilson { 24216274f212SChris Wilson struct intel_ring_buffer *ring; 24226274f212SChris Wilson int i; 24236274f212SChris Wilson 24246274f212SChris Wilson for_each_ring(ring, dev_priv, i) 24256274f212SChris Wilson ring->hangcheck.deadlock = false; 24266274f212SChris Wilson } 24276274f212SChris Wilson 2428ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 2429ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 24301ec14ad3SChris Wilson { 24311ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 24321ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 24339107e9d2SChris Wilson u32 tmp; 24349107e9d2SChris Wilson 24356274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 24366274f212SChris Wilson return active; 24376274f212SChris Wilson 24389107e9d2SChris Wilson if (IS_GEN2(dev)) 24396274f212SChris Wilson return hung; 24409107e9d2SChris Wilson 24419107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 24429107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 24439107e9d2SChris Wilson * and break the hang. This should work on 24449107e9d2SChris Wilson * all but the second generation chipsets. 24459107e9d2SChris Wilson */ 24469107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 24471ec14ad3SChris Wilson if (tmp & RING_WAIT) { 24481ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 24491ec14ad3SChris Wilson ring->name); 24501ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 24516274f212SChris Wilson return kick; 24521ec14ad3SChris Wilson } 2453a24a11e6SChris Wilson 24546274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 24556274f212SChris Wilson switch (semaphore_passed(ring)) { 24566274f212SChris Wilson default: 24576274f212SChris Wilson return hung; 24586274f212SChris Wilson case 1: 2459a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 2460a24a11e6SChris Wilson ring->name); 2461a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 24626274f212SChris Wilson return kick; 24636274f212SChris Wilson case 0: 24646274f212SChris Wilson return wait; 24656274f212SChris Wilson } 24669107e9d2SChris Wilson } 24679107e9d2SChris Wilson 24686274f212SChris Wilson return hung; 2469a24a11e6SChris Wilson } 2470d1e61e7fSChris Wilson 2471f65d9421SBen Gamari /** 2472f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 247305407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 247405407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 247505407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 247605407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 247705407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 2478f65d9421SBen Gamari */ 2479f65d9421SBen Gamari void i915_hangcheck_elapsed(unsigned long data) 2480f65d9421SBen Gamari { 2481f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 2482f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 2483b4519513SChris Wilson struct intel_ring_buffer *ring; 2484b4519513SChris Wilson int i; 248505407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 24869107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 24879107e9d2SChris Wilson #define BUSY 1 24889107e9d2SChris Wilson #define KICK 5 24899107e9d2SChris Wilson #define HUNG 20 24909107e9d2SChris Wilson #define FIRE 30 2491893eead0SChris Wilson 24923e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 24933e0dc6b0SBen Widawsky return; 24943e0dc6b0SBen Widawsky 2495b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 249605407ff8SMika Kuoppala u32 seqno, acthd; 24979107e9d2SChris Wilson bool busy = true; 2498b4519513SChris Wilson 24996274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 25006274f212SChris Wilson 250105407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 250205407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 250305407ff8SMika Kuoppala 250405407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 25059107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 25069107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 25079107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 25089107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 25099107e9d2SChris Wilson ring->name); 25109107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 25119107e9d2SChris Wilson ring->hangcheck.score += HUNG; 25129107e9d2SChris Wilson } else 25139107e9d2SChris Wilson busy = false; 251405407ff8SMika Kuoppala } else { 25159107e9d2SChris Wilson int score; 25169107e9d2SChris Wilson 25176274f212SChris Wilson /* We always increment the hangcheck score 25186274f212SChris Wilson * if the ring is busy and still processing 25196274f212SChris Wilson * the same request, so that no single request 25206274f212SChris Wilson * can run indefinitely (such as a chain of 25216274f212SChris Wilson * batches). The only time we do not increment 25226274f212SChris Wilson * the hangcheck score on this ring, if this 25236274f212SChris Wilson * ring is in a legitimate wait for another 25246274f212SChris Wilson * ring. In that case the waiting ring is a 25256274f212SChris Wilson * victim and we want to be sure we catch the 25266274f212SChris Wilson * right culprit. Then every time we do kick 25276274f212SChris Wilson * the ring, add a small increment to the 25286274f212SChris Wilson * score so that we can catch a batch that is 25296274f212SChris Wilson * being repeatedly kicked and so responsible 25306274f212SChris Wilson * for stalling the machine. 25319107e9d2SChris Wilson */ 2532ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 2533ad8beaeaSMika Kuoppala acthd); 2534ad8beaeaSMika Kuoppala 2535ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 25366274f212SChris Wilson case wait: 25376274f212SChris Wilson score = 0; 25386274f212SChris Wilson break; 25396274f212SChris Wilson case active: 25409107e9d2SChris Wilson score = BUSY; 25416274f212SChris Wilson break; 25426274f212SChris Wilson case kick: 25436274f212SChris Wilson score = KICK; 25446274f212SChris Wilson break; 25456274f212SChris Wilson case hung: 25466274f212SChris Wilson score = HUNG; 25476274f212SChris Wilson stuck[i] = true; 25486274f212SChris Wilson break; 25496274f212SChris Wilson } 25509107e9d2SChris Wilson ring->hangcheck.score += score; 255105407ff8SMika Kuoppala } 25529107e9d2SChris Wilson } else { 25539107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 25549107e9d2SChris Wilson * attempts across multiple batches. 25559107e9d2SChris Wilson */ 25569107e9d2SChris Wilson if (ring->hangcheck.score > 0) 25579107e9d2SChris Wilson ring->hangcheck.score--; 2558cbb465e7SChris Wilson } 2559f65d9421SBen Gamari 256005407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 256105407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 25629107e9d2SChris Wilson busy_count += busy; 256305407ff8SMika Kuoppala } 256405407ff8SMika Kuoppala 256505407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 25669107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 2567acd78c11SBen Widawsky DRM_ERROR("%s on %s\n", 256805407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 2569a43adf07SChris Wilson ring->name); 2570a43adf07SChris Wilson rings_hung++; 257105407ff8SMika Kuoppala } 257205407ff8SMika Kuoppala } 257305407ff8SMika Kuoppala 257405407ff8SMika Kuoppala if (rings_hung) 257505407ff8SMika Kuoppala return i915_handle_error(dev, true); 257605407ff8SMika Kuoppala 257705407ff8SMika Kuoppala if (busy_count) 257805407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 257905407ff8SMika Kuoppala * being added */ 258099584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 258105407ff8SMika Kuoppala round_jiffies_up(jiffies + 258205407ff8SMika Kuoppala DRM_I915_HANGCHECK_JIFFIES)); 2583f65d9421SBen Gamari } 2584f65d9421SBen Gamari 258591738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 258691738a95SPaulo Zanoni { 258791738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 258891738a95SPaulo Zanoni 258991738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 259091738a95SPaulo Zanoni return; 259191738a95SPaulo Zanoni 259291738a95SPaulo Zanoni /* south display irq */ 259391738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 259491738a95SPaulo Zanoni /* 259591738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 259691738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 259791738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 259891738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 259991738a95SPaulo Zanoni */ 260091738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 260191738a95SPaulo Zanoni POSTING_READ(SDEIER); 260291738a95SPaulo Zanoni } 260391738a95SPaulo Zanoni 2604c0e09200SDave Airlie /* drm_dma.h hooks 2605c0e09200SDave Airlie */ 2606f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2607036a4a7dSZhenyu Wang { 2608036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2609036a4a7dSZhenyu Wang 26104697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 26114697995bSJesse Barnes 2612036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2613bdfcdb63SDaniel Vetter 2614036a4a7dSZhenyu Wang /* XXX hotplug from PCH */ 2615036a4a7dSZhenyu Wang 2616036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2617036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 26183143a2bfSChris Wilson POSTING_READ(DEIER); 2619036a4a7dSZhenyu Wang 2620036a4a7dSZhenyu Wang /* and GT */ 2621036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2622036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 26233143a2bfSChris Wilson POSTING_READ(GTIER); 2624c650156aSZhenyu Wang 262591738a95SPaulo Zanoni ibx_irq_preinstall(dev); 26267d99163dSBen Widawsky } 26277d99163dSBen Widawsky 26287d99163dSBen Widawsky static void ivybridge_irq_preinstall(struct drm_device *dev) 26297d99163dSBen Widawsky { 26307d99163dSBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26317d99163dSBen Widawsky 26327d99163dSBen Widawsky atomic_set(&dev_priv->irq_received, 0); 26337d99163dSBen Widawsky 26347d99163dSBen Widawsky I915_WRITE(HWSTAM, 0xeffe); 26357d99163dSBen Widawsky 26367d99163dSBen Widawsky /* XXX hotplug from PCH */ 26377d99163dSBen Widawsky 26387d99163dSBen Widawsky I915_WRITE(DEIMR, 0xffffffff); 26397d99163dSBen Widawsky I915_WRITE(DEIER, 0x0); 26407d99163dSBen Widawsky POSTING_READ(DEIER); 26417d99163dSBen Widawsky 26427d99163dSBen Widawsky /* and GT */ 26437d99163dSBen Widawsky I915_WRITE(GTIMR, 0xffffffff); 26447d99163dSBen Widawsky I915_WRITE(GTIER, 0x0); 26457d99163dSBen Widawsky POSTING_READ(GTIER); 26467d99163dSBen Widawsky 2647eda63ffbSBen Widawsky /* Power management */ 2648eda63ffbSBen Widawsky I915_WRITE(GEN6_PMIMR, 0xffffffff); 2649eda63ffbSBen Widawsky I915_WRITE(GEN6_PMIER, 0x0); 2650eda63ffbSBen Widawsky POSTING_READ(GEN6_PMIER); 2651eda63ffbSBen Widawsky 265291738a95SPaulo Zanoni ibx_irq_preinstall(dev); 2653036a4a7dSZhenyu Wang } 2654036a4a7dSZhenyu Wang 26557e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 26567e231dbeSJesse Barnes { 26577e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 26587e231dbeSJesse Barnes int pipe; 26597e231dbeSJesse Barnes 26607e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 26617e231dbeSJesse Barnes 26627e231dbeSJesse Barnes /* VLV magic */ 26637e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 26647e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 26657e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 26667e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 26677e231dbeSJesse Barnes 26687e231dbeSJesse Barnes /* and GT */ 26697e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 26707e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 26717e231dbeSJesse Barnes I915_WRITE(GTIMR, 0xffffffff); 26727e231dbeSJesse Barnes I915_WRITE(GTIER, 0x0); 26737e231dbeSJesse Barnes POSTING_READ(GTIER); 26747e231dbeSJesse Barnes 26757e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 26767e231dbeSJesse Barnes 26777e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 26787e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 26797e231dbeSJesse Barnes for_each_pipe(pipe) 26807e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 26817e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 26827e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 26837e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 26847e231dbeSJesse Barnes POSTING_READ(VLV_IER); 26857e231dbeSJesse Barnes } 26867e231dbeSJesse Barnes 268782a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 268882a28bcfSDaniel Vetter { 268982a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 269082a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 269182a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2692fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 269382a28bcfSDaniel Vetter 269482a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2695fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 269682a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2697cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2698fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 269982a28bcfSDaniel Vetter } else { 2700fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 270182a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2702cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2703fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 270482a28bcfSDaniel Vetter } 270582a28bcfSDaniel Vetter 2706fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 270782a28bcfSDaniel Vetter 27087fe0b973SKeith Packard /* 27097fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 27107fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 27117fe0b973SKeith Packard * 27127fe0b973SKeith Packard * This register is the same on all known PCH chips. 27137fe0b973SKeith Packard */ 27147fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 27157fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 27167fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 27177fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 27187fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 27197fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 27207fe0b973SKeith Packard } 27217fe0b973SKeith Packard 2722d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2723d46da437SPaulo Zanoni { 2724d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 272582a28bcfSDaniel Vetter u32 mask; 2726d46da437SPaulo Zanoni 2727692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2728692a04cfSDaniel Vetter return; 2729692a04cfSDaniel Vetter 27308664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 27318664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2732de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 27338664281bSPaulo Zanoni } else { 27348664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 27358664281bSPaulo Zanoni 27368664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 27378664281bSPaulo Zanoni } 2738ab5c608bSBen Widawsky 2739d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2740d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2741d46da437SPaulo Zanoni } 2742d46da437SPaulo Zanoni 2743f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2744036a4a7dSZhenyu Wang { 27454bc9d430SDaniel Vetter unsigned long irqflags; 27464bc9d430SDaniel Vetter 2747036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2748036a4a7dSZhenyu Wang /* enable kind of interrupts always enabled */ 2749013d5aa2SJesse Barnes u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2750ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 27518664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2752de032bf4SPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON; 2753cc609d5dSBen Widawsky u32 gt_irqs; 2754036a4a7dSZhenyu Wang 27551ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2756036a4a7dSZhenyu Wang 2757036a4a7dSZhenyu Wang /* should always can generate irq */ 2758036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 27591ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 27606005ce42SDaniel Vetter I915_WRITE(DEIER, display_mask | 27616005ce42SDaniel Vetter DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); 27623143a2bfSChris Wilson POSTING_READ(DEIER); 2763036a4a7dSZhenyu Wang 27641ec14ad3SChris Wilson dev_priv->gt_irq_mask = ~0; 2765036a4a7dSZhenyu Wang 2766036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 27671ec14ad3SChris Wilson I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2768881f47b6SXiang, Haihao 2769cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT; 2770cc609d5dSBen Widawsky 27711ec14ad3SChris Wilson if (IS_GEN6(dev)) 2772cc609d5dSBen Widawsky gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 27731ec14ad3SChris Wilson else 2774cc609d5dSBen Widawsky gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2775cc609d5dSBen Widawsky ILK_BSD_USER_INTERRUPT; 2776cc609d5dSBen Widawsky 2777cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 27783143a2bfSChris Wilson POSTING_READ(GTIER); 2779036a4a7dSZhenyu Wang 2780d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 27817fe0b973SKeith Packard 2782f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 27836005ce42SDaniel Vetter /* Enable PCU event interrupts 27846005ce42SDaniel Vetter * 27856005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 27864bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 27874bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 27884bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2789f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 27904bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2791f97108d1SJesse Barnes } 2792f97108d1SJesse Barnes 2793036a4a7dSZhenyu Wang return 0; 2794036a4a7dSZhenyu Wang } 2795036a4a7dSZhenyu Wang 2796f71d4af4SJesse Barnes static int ivybridge_irq_postinstall(struct drm_device *dev) 2797b1f14ad0SJesse Barnes { 2798b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2799b1f14ad0SJesse Barnes /* enable kind of interrupts always enabled */ 2800b615b57aSChris Wilson u32 display_mask = 2801b615b57aSChris Wilson DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2802b615b57aSChris Wilson DE_PLANEC_FLIP_DONE_IVB | 2803b615b57aSChris Wilson DE_PLANEB_FLIP_DONE_IVB | 2804ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE_IVB | 28058664281bSPaulo Zanoni DE_AUX_CHANNEL_A_IVB | 28068664281bSPaulo Zanoni DE_ERR_INT_IVB; 280712638c57SBen Widawsky u32 pm_irqs = GEN6_PM_RPS_EVENTS; 2808cc609d5dSBen Widawsky u32 gt_irqs; 2809b1f14ad0SJesse Barnes 2810b1f14ad0SJesse Barnes dev_priv->irq_mask = ~display_mask; 2811b1f14ad0SJesse Barnes 2812b1f14ad0SJesse Barnes /* should always can generate irq */ 28138664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2814b1f14ad0SJesse Barnes I915_WRITE(DEIIR, I915_READ(DEIIR)); 2815b1f14ad0SJesse Barnes I915_WRITE(DEIMR, dev_priv->irq_mask); 2816b615b57aSChris Wilson I915_WRITE(DEIER, 2817b615b57aSChris Wilson display_mask | 2818b615b57aSChris Wilson DE_PIPEC_VBLANK_IVB | 2819b615b57aSChris Wilson DE_PIPEB_VBLANK_IVB | 2820b615b57aSChris Wilson DE_PIPEA_VBLANK_IVB); 2821b1f14ad0SJesse Barnes POSTING_READ(DEIER); 2822b1f14ad0SJesse Barnes 2823cc609d5dSBen Widawsky dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2824b1f14ad0SJesse Barnes 2825b1f14ad0SJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2826b1f14ad0SJesse Barnes I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2827b1f14ad0SJesse Barnes 2828cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2829cc609d5dSBen Widawsky GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2830cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 2831b1f14ad0SJesse Barnes POSTING_READ(GTIER); 2832b1f14ad0SJesse Barnes 283312638c57SBen Widawsky I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 283412638c57SBen Widawsky if (HAS_VEBOX(dev)) 283512638c57SBen Widawsky pm_irqs |= PM_VEBOX_USER_INTERRUPT | 283612638c57SBen Widawsky PM_VEBOX_CS_ERROR_INTERRUPT; 283712638c57SBen Widawsky 283812638c57SBen Widawsky /* Our enable/disable rps functions may touch these registers so 283912638c57SBen Widawsky * make sure to set a known state for only the non-RPS bits. 284012638c57SBen Widawsky * The RMW is extra paranoia since this should be called after being set 284112638c57SBen Widawsky * to a known state in preinstall. 284212638c57SBen Widawsky * */ 284312638c57SBen Widawsky I915_WRITE(GEN6_PMIMR, 284412638c57SBen Widawsky (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); 284512638c57SBen Widawsky I915_WRITE(GEN6_PMIER, 284612638c57SBen Widawsky (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); 284712638c57SBen Widawsky POSTING_READ(GEN6_PMIER); 2848eda63ffbSBen Widawsky 2849d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 28507fe0b973SKeith Packard 2851b1f14ad0SJesse Barnes return 0; 2852b1f14ad0SJesse Barnes } 2853b1f14ad0SJesse Barnes 28547e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 28557e231dbeSJesse Barnes { 28567e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2857cc609d5dSBen Widawsky u32 gt_irqs; 28587e231dbeSJesse Barnes u32 enable_mask; 285931acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2860b79480baSDaniel Vetter unsigned long irqflags; 28617e231dbeSJesse Barnes 28627e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 286331acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 286431acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 286531acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 28667e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28677e231dbeSJesse Barnes 286831acc7f5SJesse Barnes /* 286931acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 287031acc7f5SJesse Barnes * toggle them based on usage. 287131acc7f5SJesse Barnes */ 287231acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 287331acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 287431acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 28757e231dbeSJesse Barnes 287620afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 287720afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 287820afbda2SDaniel Vetter 28797e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 28807e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 28817e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28827e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 28837e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 28847e231dbeSJesse Barnes POSTING_READ(VLV_IER); 28857e231dbeSJesse Barnes 2886b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2887b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2888b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 288931acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2890515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 289131acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2892b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 289331acc7f5SJesse Barnes 28947e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28957e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 28967e231dbeSJesse Barnes 289731acc7f5SJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 289831acc7f5SJesse Barnes I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 28993bcedbe5SJesse Barnes 2900cc609d5dSBen Widawsky gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | 2901cc609d5dSBen Widawsky GT_BLT_USER_INTERRUPT; 2902cc609d5dSBen Widawsky I915_WRITE(GTIER, gt_irqs); 29037e231dbeSJesse Barnes POSTING_READ(GTIER); 29047e231dbeSJesse Barnes 29057e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 29067e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 29077e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 29087e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 29097e231dbeSJesse Barnes #endif 29107e231dbeSJesse Barnes 29117e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 291220afbda2SDaniel Vetter 291320afbda2SDaniel Vetter return 0; 291420afbda2SDaniel Vetter } 291520afbda2SDaniel Vetter 29167e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 29177e231dbeSJesse Barnes { 29187e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 29197e231dbeSJesse Barnes int pipe; 29207e231dbeSJesse Barnes 29217e231dbeSJesse Barnes if (!dev_priv) 29227e231dbeSJesse Barnes return; 29237e231dbeSJesse Barnes 2924ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2925ac4c16c5SEgbert Eich 29267e231dbeSJesse Barnes for_each_pipe(pipe) 29277e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 29287e231dbeSJesse Barnes 29297e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 29307e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 29317e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 29327e231dbeSJesse Barnes for_each_pipe(pipe) 29337e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 29347e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 29357e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 29367e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 29377e231dbeSJesse Barnes POSTING_READ(VLV_IER); 29387e231dbeSJesse Barnes } 29397e231dbeSJesse Barnes 2940f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2941036a4a7dSZhenyu Wang { 2942036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 29434697995bSJesse Barnes 29444697995bSJesse Barnes if (!dev_priv) 29454697995bSJesse Barnes return; 29464697995bSJesse Barnes 2947ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2948ac4c16c5SEgbert Eich 2949036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2950036a4a7dSZhenyu Wang 2951036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2952036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2953036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 29548664281bSPaulo Zanoni if (IS_GEN7(dev)) 29558664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2956036a4a7dSZhenyu Wang 2957036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2958036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2959036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2960192aac1fSKeith Packard 2961ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2962ab5c608bSBen Widawsky return; 2963ab5c608bSBen Widawsky 2964192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2965192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2966192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 29678664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 29688664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2969036a4a7dSZhenyu Wang } 2970036a4a7dSZhenyu Wang 2971c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2972c2798b19SChris Wilson { 2973c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2974c2798b19SChris Wilson int pipe; 2975c2798b19SChris Wilson 2976c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2977c2798b19SChris Wilson 2978c2798b19SChris Wilson for_each_pipe(pipe) 2979c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2980c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2981c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2982c2798b19SChris Wilson POSTING_READ16(IER); 2983c2798b19SChris Wilson } 2984c2798b19SChris Wilson 2985c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2986c2798b19SChris Wilson { 2987c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2988c2798b19SChris Wilson 2989c2798b19SChris Wilson I915_WRITE16(EMR, 2990c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2991c2798b19SChris Wilson 2992c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2993c2798b19SChris Wilson dev_priv->irq_mask = 2994c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2995c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2996c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2997c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2998c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2999c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 3000c2798b19SChris Wilson 3001c2798b19SChris Wilson I915_WRITE16(IER, 3002c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3003c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3004c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3005c2798b19SChris Wilson I915_USER_INTERRUPT); 3006c2798b19SChris Wilson POSTING_READ16(IER); 3007c2798b19SChris Wilson 3008c2798b19SChris Wilson return 0; 3009c2798b19SChris Wilson } 3010c2798b19SChris Wilson 301190a72f87SVille Syrjälä /* 301290a72f87SVille Syrjälä * Returns true when a page flip has completed. 301390a72f87SVille Syrjälä */ 301490a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 301590a72f87SVille Syrjälä int pipe, u16 iir) 301690a72f87SVille Syrjälä { 301790a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 301890a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 301990a72f87SVille Syrjälä 302090a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 302190a72f87SVille Syrjälä return false; 302290a72f87SVille Syrjälä 302390a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 302490a72f87SVille Syrjälä return false; 302590a72f87SVille Syrjälä 302690a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 302790a72f87SVille Syrjälä 302890a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 302990a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 303090a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 303190a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 303290a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 303390a72f87SVille Syrjälä */ 303490a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 303590a72f87SVille Syrjälä return false; 303690a72f87SVille Syrjälä 303790a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 303890a72f87SVille Syrjälä 303990a72f87SVille Syrjälä return true; 304090a72f87SVille Syrjälä } 304190a72f87SVille Syrjälä 3042ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3043c2798b19SChris Wilson { 3044c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3045c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3046c2798b19SChris Wilson u16 iir, new_iir; 3047c2798b19SChris Wilson u32 pipe_stats[2]; 3048c2798b19SChris Wilson unsigned long irqflags; 3049c2798b19SChris Wilson int irq_received; 3050c2798b19SChris Wilson int pipe; 3051c2798b19SChris Wilson u16 flip_mask = 3052c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3053c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3054c2798b19SChris Wilson 3055c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 3056c2798b19SChris Wilson 3057c2798b19SChris Wilson iir = I915_READ16(IIR); 3058c2798b19SChris Wilson if (iir == 0) 3059c2798b19SChris Wilson return IRQ_NONE; 3060c2798b19SChris Wilson 3061c2798b19SChris Wilson while (iir & ~flip_mask) { 3062c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3063c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 3064c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 3065c2798b19SChris Wilson * interrupts (for non-MSI). 3066c2798b19SChris Wilson */ 3067c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3068c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3069c2798b19SChris Wilson i915_handle_error(dev, false); 3070c2798b19SChris Wilson 3071c2798b19SChris Wilson for_each_pipe(pipe) { 3072c2798b19SChris Wilson int reg = PIPESTAT(pipe); 3073c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3074c2798b19SChris Wilson 3075c2798b19SChris Wilson /* 3076c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 3077c2798b19SChris Wilson */ 3078c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3079c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3080c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3081c2798b19SChris Wilson pipe_name(pipe)); 3082c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3083c2798b19SChris Wilson irq_received = 1; 3084c2798b19SChris Wilson } 3085c2798b19SChris Wilson } 3086c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3087c2798b19SChris Wilson 3088c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 3089c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 3090c2798b19SChris Wilson 3091d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 3092c2798b19SChris Wilson 3093c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 3094c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3095c2798b19SChris Wilson 3096c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 309790a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 309890a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 3099c2798b19SChris Wilson 3100c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 310190a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 310290a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 3103c2798b19SChris Wilson 3104c2798b19SChris Wilson iir = new_iir; 3105c2798b19SChris Wilson } 3106c2798b19SChris Wilson 3107c2798b19SChris Wilson return IRQ_HANDLED; 3108c2798b19SChris Wilson } 3109c2798b19SChris Wilson 3110c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 3111c2798b19SChris Wilson { 3112c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3113c2798b19SChris Wilson int pipe; 3114c2798b19SChris Wilson 3115c2798b19SChris Wilson for_each_pipe(pipe) { 3116c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 3117c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3118c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3119c2798b19SChris Wilson } 3120c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 3121c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 3122c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 3123c2798b19SChris Wilson } 3124c2798b19SChris Wilson 3125a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 3126a266c7d5SChris Wilson { 3127a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3128a266c7d5SChris Wilson int pipe; 3129a266c7d5SChris Wilson 3130a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3131a266c7d5SChris Wilson 3132a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3133a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3134a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3135a266c7d5SChris Wilson } 3136a266c7d5SChris Wilson 313700d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 3138a266c7d5SChris Wilson for_each_pipe(pipe) 3139a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3140a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3141a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3142a266c7d5SChris Wilson POSTING_READ(IER); 3143a266c7d5SChris Wilson } 3144a266c7d5SChris Wilson 3145a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 3146a266c7d5SChris Wilson { 3147a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 314838bde180SChris Wilson u32 enable_mask; 3149a266c7d5SChris Wilson 315038bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 315138bde180SChris Wilson 315238bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 315338bde180SChris Wilson dev_priv->irq_mask = 315438bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 315538bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 315638bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 315738bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 315838bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 315938bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 316038bde180SChris Wilson 316138bde180SChris Wilson enable_mask = 316238bde180SChris Wilson I915_ASLE_INTERRUPT | 316338bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 316438bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 316538bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 316638bde180SChris Wilson I915_USER_INTERRUPT; 316738bde180SChris Wilson 3168a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 316920afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 317020afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 317120afbda2SDaniel Vetter 3172a266c7d5SChris Wilson /* Enable in IER... */ 3173a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3174a266c7d5SChris Wilson /* and unmask in IMR */ 3175a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3176a266c7d5SChris Wilson } 3177a266c7d5SChris Wilson 3178a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3179a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3180a266c7d5SChris Wilson POSTING_READ(IER); 3181a266c7d5SChris Wilson 3182f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 318320afbda2SDaniel Vetter 318420afbda2SDaniel Vetter return 0; 318520afbda2SDaniel Vetter } 318620afbda2SDaniel Vetter 318790a72f87SVille Syrjälä /* 318890a72f87SVille Syrjälä * Returns true when a page flip has completed. 318990a72f87SVille Syrjälä */ 319090a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 319190a72f87SVille Syrjälä int plane, int pipe, u32 iir) 319290a72f87SVille Syrjälä { 319390a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 319490a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 319590a72f87SVille Syrjälä 319690a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 319790a72f87SVille Syrjälä return false; 319890a72f87SVille Syrjälä 319990a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 320090a72f87SVille Syrjälä return false; 320190a72f87SVille Syrjälä 320290a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 320390a72f87SVille Syrjälä 320490a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 320590a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 320690a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 320790a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 320890a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 320990a72f87SVille Syrjälä */ 321090a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 321190a72f87SVille Syrjälä return false; 321290a72f87SVille Syrjälä 321390a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 321490a72f87SVille Syrjälä 321590a72f87SVille Syrjälä return true; 321690a72f87SVille Syrjälä } 321790a72f87SVille Syrjälä 3218ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 3219a266c7d5SChris Wilson { 3220a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3221a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 32228291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3223a266c7d5SChris Wilson unsigned long irqflags; 322438bde180SChris Wilson u32 flip_mask = 322538bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 322638bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 322738bde180SChris Wilson int pipe, ret = IRQ_NONE; 3228a266c7d5SChris Wilson 3229a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3230a266c7d5SChris Wilson 3231a266c7d5SChris Wilson iir = I915_READ(IIR); 323238bde180SChris Wilson do { 323338bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 32348291ee90SChris Wilson bool blc_event = false; 3235a266c7d5SChris Wilson 3236a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3237a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3238a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3239a266c7d5SChris Wilson * interrupts (for non-MSI). 3240a266c7d5SChris Wilson */ 3241a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3242a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3243a266c7d5SChris Wilson i915_handle_error(dev, false); 3244a266c7d5SChris Wilson 3245a266c7d5SChris Wilson for_each_pipe(pipe) { 3246a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3247a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3248a266c7d5SChris Wilson 324938bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 3250a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3251a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3252a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3253a266c7d5SChris Wilson pipe_name(pipe)); 3254a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 325538bde180SChris Wilson irq_received = true; 3256a266c7d5SChris Wilson } 3257a266c7d5SChris Wilson } 3258a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3259a266c7d5SChris Wilson 3260a266c7d5SChris Wilson if (!irq_received) 3261a266c7d5SChris Wilson break; 3262a266c7d5SChris Wilson 3263a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3264a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 3265a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3266a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3267b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3268a266c7d5SChris Wilson 3269a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3270a266c7d5SChris Wilson hotplug_status); 327191d131d2SDaniel Vetter 327210a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 327391d131d2SDaniel Vetter 3274a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 327538bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 3276a266c7d5SChris Wilson } 3277a266c7d5SChris Wilson 327838bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 3279a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3280a266c7d5SChris Wilson 3281a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3282a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3283a266c7d5SChris Wilson 3284a266c7d5SChris Wilson for_each_pipe(pipe) { 328538bde180SChris Wilson int plane = pipe; 328638bde180SChris Wilson if (IS_MOBILE(dev)) 328738bde180SChris Wilson plane = !plane; 32885e2032d4SVille Syrjälä 328990a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 329090a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 329190a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3292a266c7d5SChris Wilson 3293a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3294a266c7d5SChris Wilson blc_event = true; 3295a266c7d5SChris Wilson } 3296a266c7d5SChris Wilson 3297a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3298a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3299a266c7d5SChris Wilson 3300a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3301a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3302a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3303a266c7d5SChris Wilson * we would never get another interrupt. 3304a266c7d5SChris Wilson * 3305a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3306a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3307a266c7d5SChris Wilson * another one. 3308a266c7d5SChris Wilson * 3309a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3310a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3311a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3312a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3313a266c7d5SChris Wilson * stray interrupts. 3314a266c7d5SChris Wilson */ 331538bde180SChris Wilson ret = IRQ_HANDLED; 3316a266c7d5SChris Wilson iir = new_iir; 331738bde180SChris Wilson } while (iir & ~flip_mask); 3318a266c7d5SChris Wilson 3319d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 33208291ee90SChris Wilson 3321a266c7d5SChris Wilson return ret; 3322a266c7d5SChris Wilson } 3323a266c7d5SChris Wilson 3324a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 3325a266c7d5SChris Wilson { 3326a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3327a266c7d5SChris Wilson int pipe; 3328a266c7d5SChris Wilson 3329ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3330ac4c16c5SEgbert Eich 3331a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 3332a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3333a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3334a266c7d5SChris Wilson } 3335a266c7d5SChris Wilson 333600d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 333755b39755SChris Wilson for_each_pipe(pipe) { 333855b39755SChris Wilson /* Clear enable bits; then clear status bits */ 3339a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 334055b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 334155b39755SChris Wilson } 3342a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3343a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3344a266c7d5SChris Wilson 3345a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3346a266c7d5SChris Wilson } 3347a266c7d5SChris Wilson 3348a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 3349a266c7d5SChris Wilson { 3350a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3351a266c7d5SChris Wilson int pipe; 3352a266c7d5SChris Wilson 3353a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 3354a266c7d5SChris Wilson 3355a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3356a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3357a266c7d5SChris Wilson 3358a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 3359a266c7d5SChris Wilson for_each_pipe(pipe) 3360a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3361a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3362a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3363a266c7d5SChris Wilson POSTING_READ(IER); 3364a266c7d5SChris Wilson } 3365a266c7d5SChris Wilson 3366a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 3367a266c7d5SChris Wilson { 3368a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3369bbba0a97SChris Wilson u32 enable_mask; 3370a266c7d5SChris Wilson u32 error_mask; 3371b79480baSDaniel Vetter unsigned long irqflags; 3372a266c7d5SChris Wilson 3373a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 3374bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3375adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 3376bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3377bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3378bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3379bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3380bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3381bbba0a97SChris Wilson 3382bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 338321ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 338421ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3385bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 3386bbba0a97SChris Wilson 3387bbba0a97SChris Wilson if (IS_G4X(dev)) 3388bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 3389a266c7d5SChris Wilson 3390b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3391b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 3392b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3393515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 3394b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3395a266c7d5SChris Wilson 3396a266c7d5SChris Wilson /* 3397a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 3398a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 3399a266c7d5SChris Wilson */ 3400a266c7d5SChris Wilson if (IS_G4X(dev)) { 3401a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 3402a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 3403a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 3404a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3405a266c7d5SChris Wilson } else { 3406a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 3407a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 3408a266c7d5SChris Wilson } 3409a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 3410a266c7d5SChris Wilson 3411a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 3412a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 3413a266c7d5SChris Wilson POSTING_READ(IER); 3414a266c7d5SChris Wilson 341520afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 341620afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 341720afbda2SDaniel Vetter 3418f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 341920afbda2SDaniel Vetter 342020afbda2SDaniel Vetter return 0; 342120afbda2SDaniel Vetter } 342220afbda2SDaniel Vetter 3423bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 342420afbda2SDaniel Vetter { 342520afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3426e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3427cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 342820afbda2SDaniel Vetter u32 hotplug_en; 342920afbda2SDaniel Vetter 3430b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 3431b5ea2d56SDaniel Vetter 3432bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 3433bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3434bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3435adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 3436e5868a31SEgbert Eich /* enable bits are the same for all generations */ 3437cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3438cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3439cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3440a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 3441a266c7d5SChris Wilson to generate a spurious hotplug event about three 3442a266c7d5SChris Wilson seconds later. So just do it once. 3443a266c7d5SChris Wilson */ 3444a266c7d5SChris Wilson if (IS_G4X(dev)) 3445a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 344685fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3447a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3448a266c7d5SChris Wilson 3449a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 3450a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3451a266c7d5SChris Wilson } 3452bac56d5bSEgbert Eich } 3453a266c7d5SChris Wilson 3454ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 3455a266c7d5SChris Wilson { 3456a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 3457a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3458a266c7d5SChris Wilson u32 iir, new_iir; 3459a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 3460a266c7d5SChris Wilson unsigned long irqflags; 3461a266c7d5SChris Wilson int irq_received; 3462a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 346321ad8330SVille Syrjälä u32 flip_mask = 346421ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 346521ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3466a266c7d5SChris Wilson 3467a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 3468a266c7d5SChris Wilson 3469a266c7d5SChris Wilson iir = I915_READ(IIR); 3470a266c7d5SChris Wilson 3471a266c7d5SChris Wilson for (;;) { 34722c8ba29fSChris Wilson bool blc_event = false; 34732c8ba29fSChris Wilson 347421ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 3475a266c7d5SChris Wilson 3476a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 3477a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 3478a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 3479a266c7d5SChris Wilson * interrupts (for non-MSI). 3480a266c7d5SChris Wilson */ 3481a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3482a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3483a266c7d5SChris Wilson i915_handle_error(dev, false); 3484a266c7d5SChris Wilson 3485a266c7d5SChris Wilson for_each_pipe(pipe) { 3486a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 3487a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 3488a266c7d5SChris Wilson 3489a266c7d5SChris Wilson /* 3490a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 3491a266c7d5SChris Wilson */ 3492a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 3493a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3494a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 3495a266c7d5SChris Wilson pipe_name(pipe)); 3496a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 3497a266c7d5SChris Wilson irq_received = 1; 3498a266c7d5SChris Wilson } 3499a266c7d5SChris Wilson } 3500a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3501a266c7d5SChris Wilson 3502a266c7d5SChris Wilson if (!irq_received) 3503a266c7d5SChris Wilson break; 3504a266c7d5SChris Wilson 3505a266c7d5SChris Wilson ret = IRQ_HANDLED; 3506a266c7d5SChris Wilson 3507a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 3508adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3509a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3510b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3511b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 35124f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 3513a266c7d5SChris Wilson 3514a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3515a266c7d5SChris Wilson hotplug_status); 351691d131d2SDaniel Vetter 351710a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 351810a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 351991d131d2SDaniel Vetter 3520a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3521a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 3522a266c7d5SChris Wilson } 3523a266c7d5SChris Wilson 352421ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 3525a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 3526a266c7d5SChris Wilson 3527a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 3528a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 3529a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 3530a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 3531a266c7d5SChris Wilson 3532a266c7d5SChris Wilson for_each_pipe(pipe) { 35332c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 353490a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 353590a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3536a266c7d5SChris Wilson 3537a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3538a266c7d5SChris Wilson blc_event = true; 3539a266c7d5SChris Wilson } 3540a266c7d5SChris Wilson 3541a266c7d5SChris Wilson 3542a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3543a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 3544a266c7d5SChris Wilson 3545515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3546515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 3547515ac2bbSDaniel Vetter 3548a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 3549a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 3550a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 3551a266c7d5SChris Wilson * we would never get another interrupt. 3552a266c7d5SChris Wilson * 3553a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 3554a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 3555a266c7d5SChris Wilson * another one. 3556a266c7d5SChris Wilson * 3557a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 3558a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 3559a266c7d5SChris Wilson * the posting read. This should be rare enough to never 3560a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 3561a266c7d5SChris Wilson * stray interrupts. 3562a266c7d5SChris Wilson */ 3563a266c7d5SChris Wilson iir = new_iir; 3564a266c7d5SChris Wilson } 3565a266c7d5SChris Wilson 3566d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 35672c8ba29fSChris Wilson 3568a266c7d5SChris Wilson return ret; 3569a266c7d5SChris Wilson } 3570a266c7d5SChris Wilson 3571a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 3572a266c7d5SChris Wilson { 3573a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3574a266c7d5SChris Wilson int pipe; 3575a266c7d5SChris Wilson 3576a266c7d5SChris Wilson if (!dev_priv) 3577a266c7d5SChris Wilson return; 3578a266c7d5SChris Wilson 3579ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 3580ac4c16c5SEgbert Eich 3581a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 3582a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3583a266c7d5SChris Wilson 3584a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 3585a266c7d5SChris Wilson for_each_pipe(pipe) 3586a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 3587a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 3588a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 3589a266c7d5SChris Wilson 3590a266c7d5SChris Wilson for_each_pipe(pipe) 3591a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 3592a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3593a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 3594a266c7d5SChris Wilson } 3595a266c7d5SChris Wilson 3596ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 3597ac4c16c5SEgbert Eich { 3598ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3599ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 3600ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3601ac4c16c5SEgbert Eich unsigned long irqflags; 3602ac4c16c5SEgbert Eich int i; 3603ac4c16c5SEgbert Eich 3604ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3605ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3606ac4c16c5SEgbert Eich struct drm_connector *connector; 3607ac4c16c5SEgbert Eich 3608ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3609ac4c16c5SEgbert Eich continue; 3610ac4c16c5SEgbert Eich 3611ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3612ac4c16c5SEgbert Eich 3613ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3614ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3615ac4c16c5SEgbert Eich 3616ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 3617ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 3618ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3619ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 3620ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 3621ac4c16c5SEgbert Eich if (!connector->polled) 3622ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3623ac4c16c5SEgbert Eich } 3624ac4c16c5SEgbert Eich } 3625ac4c16c5SEgbert Eich } 3626ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 3627ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 3628ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3629ac4c16c5SEgbert Eich } 3630ac4c16c5SEgbert Eich 3631f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 3632f71d4af4SJesse Barnes { 36338b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 36348b2e326dSChris Wilson 36358b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 363699584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3637c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3638a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 36398b2e326dSChris Wilson 364099584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 364199584db3SDaniel Vetter i915_hangcheck_elapsed, 364261bac78eSDaniel Vetter (unsigned long) dev); 3643ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3644ac4c16c5SEgbert Eich (unsigned long) dev_priv); 364561bac78eSDaniel Vetter 364697a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 36479ee32feaSDaniel Vetter 3648f71d4af4SJesse Barnes dev->driver->get_vblank_counter = i915_get_vblank_counter; 3649f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 36507d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3651f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3652f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3653f71d4af4SJesse Barnes } 3654f71d4af4SJesse Barnes 3655c3613de9SKeith Packard if (drm_core_check_feature(dev, DRIVER_MODESET)) 3656f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3657c3613de9SKeith Packard else 3658c3613de9SKeith Packard dev->driver->get_vblank_timestamp = NULL; 3659f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3660f71d4af4SJesse Barnes 36617e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 36627e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 36637e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 36647e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 36657e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 36667e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 36677e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3668fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 36694a06e201SDaniel Vetter } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 36707d99163dSBen Widawsky /* Share uninstall handlers with ILK/SNB */ 3671f71d4af4SJesse Barnes dev->driver->irq_handler = ivybridge_irq_handler; 36727d99163dSBen Widawsky dev->driver->irq_preinstall = ivybridge_irq_preinstall; 3673f71d4af4SJesse Barnes dev->driver->irq_postinstall = ivybridge_irq_postinstall; 3674f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3675f71d4af4SJesse Barnes dev->driver->enable_vblank = ivybridge_enable_vblank; 3676f71d4af4SJesse Barnes dev->driver->disable_vblank = ivybridge_disable_vblank; 367782a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3678f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3679f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3680f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3681f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3682f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3683f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3684f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 368582a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3686f71d4af4SJesse Barnes } else { 3687c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3688c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3689c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3690c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3691c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3692a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3693a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3694a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3695a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3696a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 369720afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3698c2798b19SChris Wilson } else { 3699a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3700a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3701a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3702a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3703bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3704c2798b19SChris Wilson } 3705f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3706f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3707f71d4af4SJesse Barnes } 3708f71d4af4SJesse Barnes } 370920afbda2SDaniel Vetter 371020afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 371120afbda2SDaniel Vetter { 371220afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3713821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3714821450c6SEgbert Eich struct drm_connector *connector; 3715b5ea2d56SDaniel Vetter unsigned long irqflags; 3716821450c6SEgbert Eich int i; 371720afbda2SDaniel Vetter 3718821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3719821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3720821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3721821450c6SEgbert Eich } 3722821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3723821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3724821450c6SEgbert Eich connector->polled = intel_connector->polled; 3725821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3726821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3727821450c6SEgbert Eich } 3728b5ea2d56SDaniel Vetter 3729b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3730b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3731b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 373220afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 373320afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3734b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 373520afbda2SDaniel Vetter } 3736