1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33760285e7SDavid Howells #include <drm/drmP.h> 34760285e7SDavid Howells #include <drm/i915_drm.h> 35c0e09200SDave Airlie #include "i915_drv.h" 361c5d22f7SChris Wilson #include "i915_trace.h" 3779e53945SJesse Barnes #include "intel_drv.h" 38c0e09200SDave Airlie 39e5868a31SEgbert Eich static const u32 hpd_ibx[] = { 40e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 41e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 42e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 43e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 44e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 45e5868a31SEgbert Eich }; 46e5868a31SEgbert Eich 47e5868a31SEgbert Eich static const u32 hpd_cpt[] = { 48e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 4973c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 50e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 51e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 52e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 53e5868a31SEgbert Eich }; 54e5868a31SEgbert Eich 55e5868a31SEgbert Eich static const u32 hpd_mask_i915[] = { 56e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 57e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 58e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 59e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 60e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 61e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 62e5868a31SEgbert Eich }; 63e5868a31SEgbert Eich 64e5868a31SEgbert Eich static const u32 hpd_status_gen4[] = { 65e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 66e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 67e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 68e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 69e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 70e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 71e5868a31SEgbert Eich }; 72e5868a31SEgbert Eich 73e5868a31SEgbert Eich static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 74e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 75e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 76e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 77e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 78e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 79e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 80e5868a31SEgbert Eich }; 81e5868a31SEgbert Eich 82036a4a7dSZhenyu Wang /* For display hotplug interrupt */ 83995b6762SChris Wilson static void 84f2b115e6SAdam Jackson ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85036a4a7dSZhenyu Wang { 864bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 874bc9d430SDaniel Vetter 881ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != 0) { 891ec14ad3SChris Wilson dev_priv->irq_mask &= ~mask; 901ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 913143a2bfSChris Wilson POSTING_READ(DEIMR); 92036a4a7dSZhenyu Wang } 93036a4a7dSZhenyu Wang } 94036a4a7dSZhenyu Wang 950ff9800aSPaulo Zanoni static void 96f2b115e6SAdam Jackson ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 97036a4a7dSZhenyu Wang { 984bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 994bc9d430SDaniel Vetter 1001ec14ad3SChris Wilson if ((dev_priv->irq_mask & mask) != mask) { 1011ec14ad3SChris Wilson dev_priv->irq_mask |= mask; 1021ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 1033143a2bfSChris Wilson POSTING_READ(DEIMR); 104036a4a7dSZhenyu Wang } 105036a4a7dSZhenyu Wang } 106036a4a7dSZhenyu Wang 1078664281bSPaulo Zanoni static bool ivb_can_enable_err_int(struct drm_device *dev) 1088664281bSPaulo Zanoni { 1098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1108664281bSPaulo Zanoni struct intel_crtc *crtc; 1118664281bSPaulo Zanoni enum pipe pipe; 1128664281bSPaulo Zanoni 1134bc9d430SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 1144bc9d430SDaniel Vetter 1158664281bSPaulo Zanoni for_each_pipe(pipe) { 1168664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1178664281bSPaulo Zanoni 1188664281bSPaulo Zanoni if (crtc->cpu_fifo_underrun_disabled) 1198664281bSPaulo Zanoni return false; 1208664281bSPaulo Zanoni } 1218664281bSPaulo Zanoni 1228664281bSPaulo Zanoni return true; 1238664281bSPaulo Zanoni } 1248664281bSPaulo Zanoni 1258664281bSPaulo Zanoni static bool cpt_can_enable_serr_int(struct drm_device *dev) 1268664281bSPaulo Zanoni { 1278664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1288664281bSPaulo Zanoni enum pipe pipe; 1298664281bSPaulo Zanoni struct intel_crtc *crtc; 1308664281bSPaulo Zanoni 131fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 132fee884edSDaniel Vetter 1338664281bSPaulo Zanoni for_each_pipe(pipe) { 1348664281bSPaulo Zanoni crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 1358664281bSPaulo Zanoni 1368664281bSPaulo Zanoni if (crtc->pch_fifo_underrun_disabled) 1378664281bSPaulo Zanoni return false; 1388664281bSPaulo Zanoni } 1398664281bSPaulo Zanoni 1408664281bSPaulo Zanoni return true; 1418664281bSPaulo Zanoni } 1428664281bSPaulo Zanoni 1438664281bSPaulo Zanoni static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 1448664281bSPaulo Zanoni enum pipe pipe, bool enable) 1458664281bSPaulo Zanoni { 1468664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1478664281bSPaulo Zanoni uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 1488664281bSPaulo Zanoni DE_PIPEB_FIFO_UNDERRUN; 1498664281bSPaulo Zanoni 1508664281bSPaulo Zanoni if (enable) 1518664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1528664281bSPaulo Zanoni else 1538664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1548664281bSPaulo Zanoni } 1558664281bSPaulo Zanoni 1568664281bSPaulo Zanoni static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 1577336df65SDaniel Vetter enum pipe pipe, bool enable) 1588664281bSPaulo Zanoni { 1598664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1608664281bSPaulo Zanoni if (enable) { 1617336df65SDaniel Vetter I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 1627336df65SDaniel Vetter 1638664281bSPaulo Zanoni if (!ivb_can_enable_err_int(dev)) 1648664281bSPaulo Zanoni return; 1658664281bSPaulo Zanoni 1668664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 1678664281bSPaulo Zanoni } else { 1687336df65SDaniel Vetter bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 1697336df65SDaniel Vetter 1707336df65SDaniel Vetter /* Change the state _after_ we've read out the current one. */ 1718664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 1727336df65SDaniel Vetter 1737336df65SDaniel Vetter if (!was_enabled && 1747336df65SDaniel Vetter (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 1757336df65SDaniel Vetter DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 1767336df65SDaniel Vetter pipe_name(pipe)); 1777336df65SDaniel Vetter } 1788664281bSPaulo Zanoni } 1798664281bSPaulo Zanoni } 1808664281bSPaulo Zanoni 181fee884edSDaniel Vetter /** 182fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 183fee884edSDaniel Vetter * @dev_priv: driver private 184fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 185fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 186fee884edSDaniel Vetter */ 187fee884edSDaniel Vetter static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 188fee884edSDaniel Vetter uint32_t interrupt_mask, 189fee884edSDaniel Vetter uint32_t enabled_irq_mask) 190fee884edSDaniel Vetter { 191fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 192fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 193fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 194fee884edSDaniel Vetter 195fee884edSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 196fee884edSDaniel Vetter 197fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 198fee884edSDaniel Vetter POSTING_READ(SDEIMR); 199fee884edSDaniel Vetter } 200fee884edSDaniel Vetter #define ibx_enable_display_interrupt(dev_priv, bits) \ 201fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), (bits)) 202fee884edSDaniel Vetter #define ibx_disable_display_interrupt(dev_priv, bits) \ 203fee884edSDaniel Vetter ibx_display_interrupt_update((dev_priv), (bits), 0) 204fee884edSDaniel Vetter 205de28075dSDaniel Vetter static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 206de28075dSDaniel Vetter enum transcoder pch_transcoder, 2078664281bSPaulo Zanoni bool enable) 2088664281bSPaulo Zanoni { 2098664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 210de28075dSDaniel Vetter uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 211de28075dSDaniel Vetter SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 2128664281bSPaulo Zanoni 2138664281bSPaulo Zanoni if (enable) 214fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, bit); 2158664281bSPaulo Zanoni else 216fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, bit); 2178664281bSPaulo Zanoni } 2188664281bSPaulo Zanoni 2198664281bSPaulo Zanoni static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 2208664281bSPaulo Zanoni enum transcoder pch_transcoder, 2218664281bSPaulo Zanoni bool enable) 2228664281bSPaulo Zanoni { 2238664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2248664281bSPaulo Zanoni 2258664281bSPaulo Zanoni if (enable) { 2261dd246fbSDaniel Vetter I915_WRITE(SERR_INT, 2271dd246fbSDaniel Vetter SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 2281dd246fbSDaniel Vetter 2298664281bSPaulo Zanoni if (!cpt_can_enable_serr_int(dev)) 2308664281bSPaulo Zanoni return; 2318664281bSPaulo Zanoni 232fee884edSDaniel Vetter ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 2338664281bSPaulo Zanoni } else { 2341dd246fbSDaniel Vetter uint32_t tmp = I915_READ(SERR_INT); 2351dd246fbSDaniel Vetter bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 2361dd246fbSDaniel Vetter 2371dd246fbSDaniel Vetter /* Change the state _after_ we've read out the current one. */ 238fee884edSDaniel Vetter ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 2391dd246fbSDaniel Vetter 2401dd246fbSDaniel Vetter if (!was_enabled && 2411dd246fbSDaniel Vetter (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 2421dd246fbSDaniel Vetter DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 2431dd246fbSDaniel Vetter transcoder_name(pch_transcoder)); 2441dd246fbSDaniel Vetter } 2458664281bSPaulo Zanoni } 2468664281bSPaulo Zanoni } 2478664281bSPaulo Zanoni 2488664281bSPaulo Zanoni /** 2498664281bSPaulo Zanoni * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 2508664281bSPaulo Zanoni * @dev: drm device 2518664281bSPaulo Zanoni * @pipe: pipe 2528664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2538664281bSPaulo Zanoni * 2548664281bSPaulo Zanoni * This function makes us disable or enable CPU fifo underruns for a specific 2558664281bSPaulo Zanoni * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 2568664281bSPaulo Zanoni * reporting for one pipe may also disable all the other CPU error interruts for 2578664281bSPaulo Zanoni * the other pipes, due to the fact that there's just one interrupt mask/enable 2588664281bSPaulo Zanoni * bit for all the pipes. 2598664281bSPaulo Zanoni * 2608664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 2618664281bSPaulo Zanoni */ 2628664281bSPaulo Zanoni bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 2638664281bSPaulo Zanoni enum pipe pipe, bool enable) 2648664281bSPaulo Zanoni { 2658664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 2668664281bSPaulo Zanoni struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2678664281bSPaulo Zanoni struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2688664281bSPaulo Zanoni unsigned long flags; 2698664281bSPaulo Zanoni bool ret; 2708664281bSPaulo Zanoni 2718664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 2728664281bSPaulo Zanoni 2738664281bSPaulo Zanoni ret = !intel_crtc->cpu_fifo_underrun_disabled; 2748664281bSPaulo Zanoni 2758664281bSPaulo Zanoni if (enable == ret) 2768664281bSPaulo Zanoni goto done; 2778664281bSPaulo Zanoni 2788664281bSPaulo Zanoni intel_crtc->cpu_fifo_underrun_disabled = !enable; 2798664281bSPaulo Zanoni 2808664281bSPaulo Zanoni if (IS_GEN5(dev) || IS_GEN6(dev)) 2818664281bSPaulo Zanoni ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 2828664281bSPaulo Zanoni else if (IS_GEN7(dev)) 2837336df65SDaniel Vetter ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 2848664281bSPaulo Zanoni 2858664281bSPaulo Zanoni done: 2868664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 2878664281bSPaulo Zanoni return ret; 2888664281bSPaulo Zanoni } 2898664281bSPaulo Zanoni 2908664281bSPaulo Zanoni /** 2918664281bSPaulo Zanoni * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 2928664281bSPaulo Zanoni * @dev: drm device 2938664281bSPaulo Zanoni * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 2948664281bSPaulo Zanoni * @enable: true if we want to report FIFO underrun errors, false otherwise 2958664281bSPaulo Zanoni * 2968664281bSPaulo Zanoni * This function makes us disable or enable PCH fifo underruns for a specific 2978664281bSPaulo Zanoni * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 2988664281bSPaulo Zanoni * underrun reporting for one transcoder may also disable all the other PCH 2998664281bSPaulo Zanoni * error interruts for the other transcoders, due to the fact that there's just 3008664281bSPaulo Zanoni * one interrupt mask/enable bit for all the transcoders. 3018664281bSPaulo Zanoni * 3028664281bSPaulo Zanoni * Returns the previous state of underrun reporting. 3038664281bSPaulo Zanoni */ 3048664281bSPaulo Zanoni bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 3058664281bSPaulo Zanoni enum transcoder pch_transcoder, 3068664281bSPaulo Zanoni bool enable) 3078664281bSPaulo Zanoni { 3088664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 309de28075dSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 310de28075dSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3118664281bSPaulo Zanoni unsigned long flags; 3128664281bSPaulo Zanoni bool ret; 3138664281bSPaulo Zanoni 314de28075dSDaniel Vetter /* 315de28075dSDaniel Vetter * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 316de28075dSDaniel Vetter * has only one pch transcoder A that all pipes can use. To avoid racy 317de28075dSDaniel Vetter * pch transcoder -> pipe lookups from interrupt code simply store the 318de28075dSDaniel Vetter * underrun statistics in crtc A. Since we never expose this anywhere 319de28075dSDaniel Vetter * nor use it outside of the fifo underrun code here using the "wrong" 320de28075dSDaniel Vetter * crtc on LPT won't cause issues. 321de28075dSDaniel Vetter */ 3228664281bSPaulo Zanoni 3238664281bSPaulo Zanoni spin_lock_irqsave(&dev_priv->irq_lock, flags); 3248664281bSPaulo Zanoni 3258664281bSPaulo Zanoni ret = !intel_crtc->pch_fifo_underrun_disabled; 3268664281bSPaulo Zanoni 3278664281bSPaulo Zanoni if (enable == ret) 3288664281bSPaulo Zanoni goto done; 3298664281bSPaulo Zanoni 3308664281bSPaulo Zanoni intel_crtc->pch_fifo_underrun_disabled = !enable; 3318664281bSPaulo Zanoni 3328664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) 333de28075dSDaniel Vetter ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 3348664281bSPaulo Zanoni else 3358664281bSPaulo Zanoni cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 3368664281bSPaulo Zanoni 3378664281bSPaulo Zanoni done: 3388664281bSPaulo Zanoni spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 3398664281bSPaulo Zanoni return ret; 3408664281bSPaulo Zanoni } 3418664281bSPaulo Zanoni 3428664281bSPaulo Zanoni 3437c463586SKeith Packard void 3447c463586SKeith Packard i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3457c463586SKeith Packard { 3469db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 34746c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3487c463586SKeith Packard 349b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 350b79480baSDaniel Vetter 35146c06a30SVille Syrjälä if ((pipestat & mask) == mask) 35246c06a30SVille Syrjälä return; 35346c06a30SVille Syrjälä 3547c463586SKeith Packard /* Enable the interrupt, clear any pending status */ 35546c06a30SVille Syrjälä pipestat |= mask | (mask >> 16); 35646c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3573143a2bfSChris Wilson POSTING_READ(reg); 3587c463586SKeith Packard } 3597c463586SKeith Packard 3607c463586SKeith Packard void 3617c463586SKeith Packard i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 3627c463586SKeith Packard { 3639db4a9c7SJesse Barnes u32 reg = PIPESTAT(pipe); 36446c06a30SVille Syrjälä u32 pipestat = I915_READ(reg) & 0x7fff0000; 3657c463586SKeith Packard 366b79480baSDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 367b79480baSDaniel Vetter 36846c06a30SVille Syrjälä if ((pipestat & mask) == 0) 36946c06a30SVille Syrjälä return; 37046c06a30SVille Syrjälä 37146c06a30SVille Syrjälä pipestat &= ~mask; 37246c06a30SVille Syrjälä I915_WRITE(reg, pipestat); 3733143a2bfSChris Wilson POSTING_READ(reg); 3747c463586SKeith Packard } 3757c463586SKeith Packard 376c0e09200SDave Airlie /** 377f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 37801c66889SZhao Yakui */ 379f49e38ddSJani Nikula static void i915_enable_asle_pipestat(struct drm_device *dev) 38001c66889SZhao Yakui { 3811ec14ad3SChris Wilson drm_i915_private_t *dev_priv = dev->dev_private; 3821ec14ad3SChris Wilson unsigned long irqflags; 3831ec14ad3SChris Wilson 384f49e38ddSJani Nikula if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 385f49e38ddSJani Nikula return; 386f49e38ddSJani Nikula 3871ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 38801c66889SZhao Yakui 389f898780bSJani Nikula i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); 390a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) 391f898780bSJani Nikula i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); 3921ec14ad3SChris Wilson 3931ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 39401c66889SZhao Yakui } 39501c66889SZhao Yakui 39601c66889SZhao Yakui /** 3970a3e67a4SJesse Barnes * i915_pipe_enabled - check if a pipe is enabled 3980a3e67a4SJesse Barnes * @dev: DRM device 3990a3e67a4SJesse Barnes * @pipe: pipe to check 4000a3e67a4SJesse Barnes * 4010a3e67a4SJesse Barnes * Reading certain registers when the pipe is disabled can hang the chip. 4020a3e67a4SJesse Barnes * Use this routine to make sure the PLL is running and the pipe is active 4030a3e67a4SJesse Barnes * before reading such registers if unsure. 4040a3e67a4SJesse Barnes */ 4050a3e67a4SJesse Barnes static int 4060a3e67a4SJesse Barnes i915_pipe_enabled(struct drm_device *dev, int pipe) 4070a3e67a4SJesse Barnes { 4080a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 409702e7a56SPaulo Zanoni 410a01025afSDaniel Vetter if (drm_core_check_feature(dev, DRIVER_MODESET)) { 411a01025afSDaniel Vetter /* Locking is horribly broken here, but whatever. */ 412a01025afSDaniel Vetter struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 413a01025afSDaniel Vetter struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 41471f8ba6bSPaulo Zanoni 415a01025afSDaniel Vetter return intel_crtc->active; 416a01025afSDaniel Vetter } else { 417a01025afSDaniel Vetter return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 418a01025afSDaniel Vetter } 4190a3e67a4SJesse Barnes } 4200a3e67a4SJesse Barnes 42142f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 42242f52ef8SKeith Packard * we use as a pipe index 42342f52ef8SKeith Packard */ 424f71d4af4SJesse Barnes static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 4250a3e67a4SJesse Barnes { 4260a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4270a3e67a4SJesse Barnes unsigned long high_frame; 4280a3e67a4SJesse Barnes unsigned long low_frame; 4295eddb70bSChris Wilson u32 high1, high2, low; 4300a3e67a4SJesse Barnes 4310a3e67a4SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 43244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4339db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4340a3e67a4SJesse Barnes return 0; 4350a3e67a4SJesse Barnes } 4360a3e67a4SJesse Barnes 4379db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 4389db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 4395eddb70bSChris Wilson 4400a3e67a4SJesse Barnes /* 4410a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 4420a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 4430a3e67a4SJesse Barnes * register. 4440a3e67a4SJesse Barnes */ 4450a3e67a4SJesse Barnes do { 4465eddb70bSChris Wilson high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4475eddb70bSChris Wilson low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 4485eddb70bSChris Wilson high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 4490a3e67a4SJesse Barnes } while (high1 != high2); 4500a3e67a4SJesse Barnes 4515eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 4525eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 4535eddb70bSChris Wilson return (high1 << 8) | low; 4540a3e67a4SJesse Barnes } 4550a3e67a4SJesse Barnes 456f71d4af4SJesse Barnes static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 4579880b7a5SJesse Barnes { 4589880b7a5SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4599db4a9c7SJesse Barnes int reg = PIPE_FRMCOUNT_GM45(pipe); 4609880b7a5SJesse Barnes 4619880b7a5SJesse Barnes if (!i915_pipe_enabled(dev, pipe)) { 46244d98a61SZhao Yakui DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 4639db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4649880b7a5SJesse Barnes return 0; 4659880b7a5SJesse Barnes } 4669880b7a5SJesse Barnes 4679880b7a5SJesse Barnes return I915_READ(reg); 4689880b7a5SJesse Barnes } 4699880b7a5SJesse Barnes 470f71d4af4SJesse Barnes static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 4710af7e4dfSMario Kleiner int *vpos, int *hpos) 4720af7e4dfSMario Kleiner { 4730af7e4dfSMario Kleiner drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 4740af7e4dfSMario Kleiner u32 vbl = 0, position = 0; 4750af7e4dfSMario Kleiner int vbl_start, vbl_end, htotal, vtotal; 4760af7e4dfSMario Kleiner bool in_vbl = true; 4770af7e4dfSMario Kleiner int ret = 0; 478fe2b8f9dSPaulo Zanoni enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 479fe2b8f9dSPaulo Zanoni pipe); 4800af7e4dfSMario Kleiner 4810af7e4dfSMario Kleiner if (!i915_pipe_enabled(dev, pipe)) { 4820af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 4839db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 4840af7e4dfSMario Kleiner return 0; 4850af7e4dfSMario Kleiner } 4860af7e4dfSMario Kleiner 4870af7e4dfSMario Kleiner /* Get vtotal. */ 488fe2b8f9dSPaulo Zanoni vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 4890af7e4dfSMario Kleiner 4900af7e4dfSMario Kleiner if (INTEL_INFO(dev)->gen >= 4) { 4910af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 4920af7e4dfSMario Kleiner * scanout position from Display scan line register. 4930af7e4dfSMario Kleiner */ 4940af7e4dfSMario Kleiner position = I915_READ(PIPEDSL(pipe)); 4950af7e4dfSMario Kleiner 4960af7e4dfSMario Kleiner /* Decode into vertical scanout position. Don't have 4970af7e4dfSMario Kleiner * horizontal scanout position. 4980af7e4dfSMario Kleiner */ 4990af7e4dfSMario Kleiner *vpos = position & 0x1fff; 5000af7e4dfSMario Kleiner *hpos = 0; 5010af7e4dfSMario Kleiner } else { 5020af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 5030af7e4dfSMario Kleiner * We can split this into vertical and horizontal 5040af7e4dfSMario Kleiner * scanout position. 5050af7e4dfSMario Kleiner */ 5060af7e4dfSMario Kleiner position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 5070af7e4dfSMario Kleiner 508fe2b8f9dSPaulo Zanoni htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 5090af7e4dfSMario Kleiner *vpos = position / htotal; 5100af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 5110af7e4dfSMario Kleiner } 5120af7e4dfSMario Kleiner 5130af7e4dfSMario Kleiner /* Query vblank area. */ 514fe2b8f9dSPaulo Zanoni vbl = I915_READ(VBLANK(cpu_transcoder)); 5150af7e4dfSMario Kleiner 5160af7e4dfSMario Kleiner /* Test position against vblank region. */ 5170af7e4dfSMario Kleiner vbl_start = vbl & 0x1fff; 5180af7e4dfSMario Kleiner vbl_end = (vbl >> 16) & 0x1fff; 5190af7e4dfSMario Kleiner 5200af7e4dfSMario Kleiner if ((*vpos < vbl_start) || (*vpos > vbl_end)) 5210af7e4dfSMario Kleiner in_vbl = false; 5220af7e4dfSMario Kleiner 5230af7e4dfSMario Kleiner /* Inside "upper part" of vblank area? Apply corrective offset: */ 5240af7e4dfSMario Kleiner if (in_vbl && (*vpos >= vbl_start)) 5250af7e4dfSMario Kleiner *vpos = *vpos - vtotal; 5260af7e4dfSMario Kleiner 5270af7e4dfSMario Kleiner /* Readouts valid? */ 5280af7e4dfSMario Kleiner if (vbl > 0) 5290af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 5300af7e4dfSMario Kleiner 5310af7e4dfSMario Kleiner /* In vblank? */ 5320af7e4dfSMario Kleiner if (in_vbl) 5330af7e4dfSMario Kleiner ret |= DRM_SCANOUTPOS_INVBL; 5340af7e4dfSMario Kleiner 5350af7e4dfSMario Kleiner return ret; 5360af7e4dfSMario Kleiner } 5370af7e4dfSMario Kleiner 538f71d4af4SJesse Barnes static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 5390af7e4dfSMario Kleiner int *max_error, 5400af7e4dfSMario Kleiner struct timeval *vblank_time, 5410af7e4dfSMario Kleiner unsigned flags) 5420af7e4dfSMario Kleiner { 5434041b853SChris Wilson struct drm_crtc *crtc; 5440af7e4dfSMario Kleiner 5457eb552aeSBen Widawsky if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 5464041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5470af7e4dfSMario Kleiner return -EINVAL; 5480af7e4dfSMario Kleiner } 5490af7e4dfSMario Kleiner 5500af7e4dfSMario Kleiner /* Get drm_crtc to timestamp: */ 5514041b853SChris Wilson crtc = intel_get_crtc_for_pipe(dev, pipe); 5524041b853SChris Wilson if (crtc == NULL) { 5534041b853SChris Wilson DRM_ERROR("Invalid crtc %d\n", pipe); 5544041b853SChris Wilson return -EINVAL; 5554041b853SChris Wilson } 5564041b853SChris Wilson 5574041b853SChris Wilson if (!crtc->enabled) { 5584041b853SChris Wilson DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 5594041b853SChris Wilson return -EBUSY; 5604041b853SChris Wilson } 5610af7e4dfSMario Kleiner 5620af7e4dfSMario Kleiner /* Helper routine in DRM core does all the work: */ 5634041b853SChris Wilson return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 5644041b853SChris Wilson vblank_time, flags, 5654041b853SChris Wilson crtc); 5660af7e4dfSMario Kleiner } 5670af7e4dfSMario Kleiner 568321a1b30SEgbert Eich static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) 569321a1b30SEgbert Eich { 570321a1b30SEgbert Eich enum drm_connector_status old_status; 571321a1b30SEgbert Eich 572321a1b30SEgbert Eich WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 573321a1b30SEgbert Eich old_status = connector->status; 574321a1b30SEgbert Eich 575321a1b30SEgbert Eich connector->status = connector->funcs->detect(connector, false); 576321a1b30SEgbert Eich DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 577321a1b30SEgbert Eich connector->base.id, 578321a1b30SEgbert Eich drm_get_connector_name(connector), 579321a1b30SEgbert Eich old_status, connector->status); 580321a1b30SEgbert Eich return (old_status != connector->status); 581321a1b30SEgbert Eich } 582321a1b30SEgbert Eich 5835ca58282SJesse Barnes /* 5845ca58282SJesse Barnes * Handle hotplug events outside the interrupt handler proper. 5855ca58282SJesse Barnes */ 586ac4c16c5SEgbert Eich #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 587ac4c16c5SEgbert Eich 5885ca58282SJesse Barnes static void i915_hotplug_work_func(struct work_struct *work) 5895ca58282SJesse Barnes { 5905ca58282SJesse Barnes drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 5915ca58282SJesse Barnes hotplug_work); 5925ca58282SJesse Barnes struct drm_device *dev = dev_priv->dev; 593c31c4ba3SKeith Packard struct drm_mode_config *mode_config = &dev->mode_config; 594cd569aedSEgbert Eich struct intel_connector *intel_connector; 595cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 596cd569aedSEgbert Eich struct drm_connector *connector; 597cd569aedSEgbert Eich unsigned long irqflags; 598cd569aedSEgbert Eich bool hpd_disabled = false; 599321a1b30SEgbert Eich bool changed = false; 600142e2398SEgbert Eich u32 hpd_event_bits; 6015ca58282SJesse Barnes 60252d7ecedSDaniel Vetter /* HPD irq before everything is fully set up. */ 60352d7ecedSDaniel Vetter if (!dev_priv->enable_hotplug_processing) 60452d7ecedSDaniel Vetter return; 60552d7ecedSDaniel Vetter 606a65e34c7SKeith Packard mutex_lock(&mode_config->mutex); 607e67189abSJesse Barnes DRM_DEBUG_KMS("running encoder hotplug functions\n"); 608e67189abSJesse Barnes 609cd569aedSEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 610142e2398SEgbert Eich 611142e2398SEgbert Eich hpd_event_bits = dev_priv->hpd_event_bits; 612142e2398SEgbert Eich dev_priv->hpd_event_bits = 0; 613cd569aedSEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 614cd569aedSEgbert Eich intel_connector = to_intel_connector(connector); 615cd569aedSEgbert Eich intel_encoder = intel_connector->encoder; 616cd569aedSEgbert Eich if (intel_encoder->hpd_pin > HPD_NONE && 617cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 618cd569aedSEgbert Eich connector->polled == DRM_CONNECTOR_POLL_HPD) { 619cd569aedSEgbert Eich DRM_INFO("HPD interrupt storm detected on connector %s: " 620cd569aedSEgbert Eich "switching from hotplug detection to polling\n", 621cd569aedSEgbert Eich drm_get_connector_name(connector)); 622cd569aedSEgbert Eich dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 623cd569aedSEgbert Eich connector->polled = DRM_CONNECTOR_POLL_CONNECT 624cd569aedSEgbert Eich | DRM_CONNECTOR_POLL_DISCONNECT; 625cd569aedSEgbert Eich hpd_disabled = true; 626cd569aedSEgbert Eich } 627142e2398SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 628142e2398SEgbert Eich DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 629142e2398SEgbert Eich drm_get_connector_name(connector), intel_encoder->hpd_pin); 630142e2398SEgbert Eich } 631cd569aedSEgbert Eich } 632cd569aedSEgbert Eich /* if there were no outputs to poll, poll was disabled, 633cd569aedSEgbert Eich * therefore make sure it's enabled when disabling HPD on 634cd569aedSEgbert Eich * some connectors */ 635ac4c16c5SEgbert Eich if (hpd_disabled) { 636cd569aedSEgbert Eich drm_kms_helper_poll_enable(dev); 637ac4c16c5SEgbert Eich mod_timer(&dev_priv->hotplug_reenable_timer, 638ac4c16c5SEgbert Eich jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 639ac4c16c5SEgbert Eich } 640cd569aedSEgbert Eich 641cd569aedSEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 642cd569aedSEgbert Eich 643321a1b30SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 644321a1b30SEgbert Eich intel_connector = to_intel_connector(connector); 645321a1b30SEgbert Eich intel_encoder = intel_connector->encoder; 646321a1b30SEgbert Eich if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 647cd569aedSEgbert Eich if (intel_encoder->hot_plug) 648cd569aedSEgbert Eich intel_encoder->hot_plug(intel_encoder); 649321a1b30SEgbert Eich if (intel_hpd_irq_event(dev, connector)) 650321a1b30SEgbert Eich changed = true; 651321a1b30SEgbert Eich } 652321a1b30SEgbert Eich } 65340ee3381SKeith Packard mutex_unlock(&mode_config->mutex); 65440ee3381SKeith Packard 655321a1b30SEgbert Eich if (changed) 656321a1b30SEgbert Eich drm_kms_helper_hotplug_event(dev); 6575ca58282SJesse Barnes } 6585ca58282SJesse Barnes 659d0ecd7e2SDaniel Vetter static void ironlake_rps_change_irq_handler(struct drm_device *dev) 660f97108d1SJesse Barnes { 661f97108d1SJesse Barnes drm_i915_private_t *dev_priv = dev->dev_private; 662b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 6639270388eSDaniel Vetter u8 new_delay; 6649270388eSDaniel Vetter 665d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 666f97108d1SJesse Barnes 66773edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 66873edd18fSDaniel Vetter 66920e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 6709270388eSDaniel Vetter 6717648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 672b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 673b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 674f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 675f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 676f97108d1SJesse Barnes 677f97108d1SJesse Barnes /* Handle RCS change request from hw */ 678b5b72e89SMatthew Garrett if (busy_up > max_avg) { 67920e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 68020e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 68120e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 68220e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 683b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 68420e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 68520e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 68620e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 68720e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 688f97108d1SJesse Barnes } 689f97108d1SJesse Barnes 6907648fa99SJesse Barnes if (ironlake_set_drps(dev, new_delay)) 69120e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 692f97108d1SJesse Barnes 693d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 6949270388eSDaniel Vetter 695f97108d1SJesse Barnes return; 696f97108d1SJesse Barnes } 697f97108d1SJesse Barnes 698549f7365SChris Wilson static void notify_ring(struct drm_device *dev, 699549f7365SChris Wilson struct intel_ring_buffer *ring) 700549f7365SChris Wilson { 701475553deSChris Wilson if (ring->obj == NULL) 702475553deSChris Wilson return; 703475553deSChris Wilson 704b2eadbc8SChris Wilson trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 7059862e600SChris Wilson 706549f7365SChris Wilson wake_up_all(&ring->irq_queue); 70710cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 708549f7365SChris Wilson } 709549f7365SChris Wilson 7104912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 7113b8d8d91SJesse Barnes { 7124912d041SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 713c6a828d3SDaniel Vetter rps.work); 7144912d041SBen Widawsky u32 pm_iir, pm_imr; 7157b9e0ae6SChris Wilson u8 new_delay; 7163b8d8d91SJesse Barnes 71759cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 718c6a828d3SDaniel Vetter pm_iir = dev_priv->rps.pm_iir; 719c6a828d3SDaniel Vetter dev_priv->rps.pm_iir = 0; 7204912d041SBen Widawsky pm_imr = I915_READ(GEN6_PMIMR); 7214848405cSBen Widawsky /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 7224848405cSBen Widawsky I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); 72359cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 7244912d041SBen Widawsky 7254848405cSBen Widawsky if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 7263b8d8d91SJesse Barnes return; 7273b8d8d91SJesse Barnes 7284fc688ceSJesse Barnes mutex_lock(&dev_priv->rps.hw_lock); 7297b9e0ae6SChris Wilson 7307425034aSVille Syrjälä if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 731c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay + 1; 7327425034aSVille Syrjälä 7337425034aSVille Syrjälä /* 7347425034aSVille Syrjälä * For better performance, jump directly 7357425034aSVille Syrjälä * to RPe if we're below it. 7367425034aSVille Syrjälä */ 7377425034aSVille Syrjälä if (IS_VALLEYVIEW(dev_priv->dev) && 7387425034aSVille Syrjälä dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) 7397425034aSVille Syrjälä new_delay = dev_priv->rps.rpe_delay; 7407425034aSVille Syrjälä } else 741c6a828d3SDaniel Vetter new_delay = dev_priv->rps.cur_delay - 1; 7423b8d8d91SJesse Barnes 74379249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 74479249636SBen Widawsky * interrupt 74579249636SBen Widawsky */ 746d8289c9eSVille Syrjälä if (new_delay >= dev_priv->rps.min_delay && 747d8289c9eSVille Syrjälä new_delay <= dev_priv->rps.max_delay) { 7480a073b84SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) 7490a073b84SJesse Barnes valleyview_set_rps(dev_priv->dev, new_delay); 7500a073b84SJesse Barnes else 7514912d041SBen Widawsky gen6_set_rps(dev_priv->dev, new_delay); 75279249636SBen Widawsky } 7533b8d8d91SJesse Barnes 75452ceb908SJesse Barnes if (IS_VALLEYVIEW(dev_priv->dev)) { 75552ceb908SJesse Barnes /* 75652ceb908SJesse Barnes * On VLV, when we enter RC6 we may not be at the minimum 75752ceb908SJesse Barnes * voltage level, so arm a timer to check. It should only 75852ceb908SJesse Barnes * fire when there's activity or once after we've entered 75952ceb908SJesse Barnes * RC6, and then won't be re-armed until the next RPS interrupt. 76052ceb908SJesse Barnes */ 76152ceb908SJesse Barnes mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, 76252ceb908SJesse Barnes msecs_to_jiffies(100)); 76352ceb908SJesse Barnes } 76452ceb908SJesse Barnes 7654fc688ceSJesse Barnes mutex_unlock(&dev_priv->rps.hw_lock); 7663b8d8d91SJesse Barnes } 7673b8d8d91SJesse Barnes 768e3689190SBen Widawsky 769e3689190SBen Widawsky /** 770e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 771e3689190SBen Widawsky * occurred. 772e3689190SBen Widawsky * @work: workqueue struct 773e3689190SBen Widawsky * 774e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 775e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 776e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 777e3689190SBen Widawsky */ 778e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 779e3689190SBen Widawsky { 780e3689190SBen Widawsky drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 781a4da4fa4SDaniel Vetter l3_parity.error_work); 782e3689190SBen Widawsky u32 error_status, row, bank, subbank; 783e3689190SBen Widawsky char *parity_event[5]; 784e3689190SBen Widawsky uint32_t misccpctl; 785e3689190SBen Widawsky unsigned long flags; 786e3689190SBen Widawsky 787e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 788e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 789e3689190SBen Widawsky * any time we access those registers. 790e3689190SBen Widawsky */ 791e3689190SBen Widawsky mutex_lock(&dev_priv->dev->struct_mutex); 792e3689190SBen Widawsky 793e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 794e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 795e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 796e3689190SBen Widawsky 797e3689190SBen Widawsky error_status = I915_READ(GEN7_L3CDERRST1); 798e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 799e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 800e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 801e3689190SBen Widawsky 802e3689190SBen Widawsky I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 803e3689190SBen Widawsky GEN7_L3CDERRST1_ENABLE); 804e3689190SBen Widawsky POSTING_READ(GEN7_L3CDERRST1); 805e3689190SBen Widawsky 806e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 807e3689190SBen Widawsky 808e3689190SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, flags); 809cc609d5dSBen Widawsky dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 810e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 811e3689190SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 812e3689190SBen Widawsky 813e3689190SBen Widawsky mutex_unlock(&dev_priv->dev->struct_mutex); 814e3689190SBen Widawsky 815e3689190SBen Widawsky parity_event[0] = "L3_PARITY_ERROR=1"; 816e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 817e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 818e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 819e3689190SBen Widawsky parity_event[4] = NULL; 820e3689190SBen Widawsky 821e3689190SBen Widawsky kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 822e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 823e3689190SBen Widawsky 824e3689190SBen Widawsky DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 825e3689190SBen Widawsky row, bank, subbank); 826e3689190SBen Widawsky 827e3689190SBen Widawsky kfree(parity_event[3]); 828e3689190SBen Widawsky kfree(parity_event[2]); 829e3689190SBen Widawsky kfree(parity_event[1]); 830e3689190SBen Widawsky } 831e3689190SBen Widawsky 832d0ecd7e2SDaniel Vetter static void ivybridge_parity_error_irq_handler(struct drm_device *dev) 833e3689190SBen Widawsky { 834e3689190SBen Widawsky drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 835e3689190SBen Widawsky 836e1ef7cc2SBen Widawsky if (!HAS_L3_GPU_CACHE(dev)) 837e3689190SBen Widawsky return; 838e3689190SBen Widawsky 839d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 840cc609d5dSBen Widawsky dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 841e3689190SBen Widawsky I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 842d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 843e3689190SBen Widawsky 844a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 845e3689190SBen Widawsky } 846e3689190SBen Widawsky 847f1af8fc1SPaulo Zanoni static void ilk_gt_irq_handler(struct drm_device *dev, 848f1af8fc1SPaulo Zanoni struct drm_i915_private *dev_priv, 849f1af8fc1SPaulo Zanoni u32 gt_iir) 850f1af8fc1SPaulo Zanoni { 851f1af8fc1SPaulo Zanoni if (gt_iir & 852f1af8fc1SPaulo Zanoni (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 853f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[RCS]); 854f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 855f1af8fc1SPaulo Zanoni notify_ring(dev, &dev_priv->ring[VCS]); 856f1af8fc1SPaulo Zanoni } 857f1af8fc1SPaulo Zanoni 858e7b4c6b1SDaniel Vetter static void snb_gt_irq_handler(struct drm_device *dev, 859e7b4c6b1SDaniel Vetter struct drm_i915_private *dev_priv, 860e7b4c6b1SDaniel Vetter u32 gt_iir) 861e7b4c6b1SDaniel Vetter { 862e7b4c6b1SDaniel Vetter 863cc609d5dSBen Widawsky if (gt_iir & 864cc609d5dSBen Widawsky (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 865e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[RCS]); 866cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 867e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[VCS]); 868cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 869e7b4c6b1SDaniel Vetter notify_ring(dev, &dev_priv->ring[BCS]); 870e7b4c6b1SDaniel Vetter 871cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 872cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 873cc609d5dSBen Widawsky GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 874e7b4c6b1SDaniel Vetter DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 875e7b4c6b1SDaniel Vetter i915_handle_error(dev, false); 876e7b4c6b1SDaniel Vetter } 877e3689190SBen Widawsky 878cc609d5dSBen Widawsky if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 879d0ecd7e2SDaniel Vetter ivybridge_parity_error_irq_handler(dev); 880e7b4c6b1SDaniel Vetter } 881e7b4c6b1SDaniel Vetter 882baf02a1fSBen Widawsky /* Legacy way of handling PM interrupts */ 883d0ecd7e2SDaniel Vetter static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, 884fc6826d1SChris Wilson u32 pm_iir) 885fc6826d1SChris Wilson { 886fc6826d1SChris Wilson /* 887fc6826d1SChris Wilson * IIR bits should never already be set because IMR should 888fc6826d1SChris Wilson * prevent an interrupt from being shown in IIR. The warning 889fc6826d1SChris Wilson * displays a case where we've unsafely cleared 890c6a828d3SDaniel Vetter * dev_priv->rps.pm_iir. Although missing an interrupt of the same 891fc6826d1SChris Wilson * type is not a problem, it displays a problem in the logic. 892fc6826d1SChris Wilson * 893c6a828d3SDaniel Vetter * The mask bit in IMR is cleared by dev_priv->rps.work. 894fc6826d1SChris Wilson */ 895fc6826d1SChris Wilson 89659cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 897c6a828d3SDaniel Vetter dev_priv->rps.pm_iir |= pm_iir; 898c6a828d3SDaniel Vetter I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 899fc6826d1SChris Wilson POSTING_READ(GEN6_PMIMR); 90059cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 901fc6826d1SChris Wilson 902c6a828d3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 903fc6826d1SChris Wilson } 904fc6826d1SChris Wilson 905b543fb04SEgbert Eich #define HPD_STORM_DETECT_PERIOD 1000 906b543fb04SEgbert Eich #define HPD_STORM_THRESHOLD 5 907b543fb04SEgbert Eich 90810a504deSDaniel Vetter static inline void intel_hpd_irq_handler(struct drm_device *dev, 909b543fb04SEgbert Eich u32 hotplug_trigger, 910b543fb04SEgbert Eich const u32 *hpd) 911b543fb04SEgbert Eich { 912b543fb04SEgbert Eich drm_i915_private_t *dev_priv = dev->dev_private; 913b543fb04SEgbert Eich int i; 91410a504deSDaniel Vetter bool storm_detected = false; 915b543fb04SEgbert Eich 91691d131d2SDaniel Vetter if (!hotplug_trigger) 91791d131d2SDaniel Vetter return; 91891d131d2SDaniel Vetter 919b5ea2d56SDaniel Vetter spin_lock(&dev_priv->irq_lock); 920b543fb04SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 921821450c6SEgbert Eich 922b543fb04SEgbert Eich if (!(hpd[i] & hotplug_trigger) || 923b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 924b543fb04SEgbert Eich continue; 925b543fb04SEgbert Eich 926bc5ead8cSJani Nikula dev_priv->hpd_event_bits |= (1 << i); 927b543fb04SEgbert Eich if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 928b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies 929b543fb04SEgbert Eich + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 930b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 931b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 932b543fb04SEgbert Eich } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 933b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 934142e2398SEgbert Eich dev_priv->hpd_event_bits &= ~(1 << i); 935b543fb04SEgbert Eich DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 93610a504deSDaniel Vetter storm_detected = true; 937b543fb04SEgbert Eich } else { 938b543fb04SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt++; 939b543fb04SEgbert Eich } 940b543fb04SEgbert Eich } 941b543fb04SEgbert Eich 94210a504deSDaniel Vetter if (storm_detected) 94310a504deSDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 944b5ea2d56SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 9455876fa0dSDaniel Vetter 9465876fa0dSDaniel Vetter queue_work(dev_priv->wq, 9475876fa0dSDaniel Vetter &dev_priv->hotplug_work); 948b543fb04SEgbert Eich } 949b543fb04SEgbert Eich 950515ac2bbSDaniel Vetter static void gmbus_irq_handler(struct drm_device *dev) 951515ac2bbSDaniel Vetter { 95228c70f16SDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 95328c70f16SDaniel Vetter 95428c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 955515ac2bbSDaniel Vetter } 956515ac2bbSDaniel Vetter 957ce99c256SDaniel Vetter static void dp_aux_irq_handler(struct drm_device *dev) 958ce99c256SDaniel Vetter { 9599ee32feaSDaniel Vetter struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 9609ee32feaSDaniel Vetter 9619ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 962ce99c256SDaniel Vetter } 963ce99c256SDaniel Vetter 964d0ecd7e2SDaniel Vetter /* Unlike gen6_rps_irq_handler() from which this function is originally derived, 965baf02a1fSBen Widawsky * we must be able to deal with other PM interrupts. This is complicated because 966baf02a1fSBen Widawsky * of the way in which we use the masks to defer the RPS work (which for 967baf02a1fSBen Widawsky * posterity is necessary because of forcewake). 968baf02a1fSBen Widawsky */ 969baf02a1fSBen Widawsky static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, 970baf02a1fSBen Widawsky u32 pm_iir) 971baf02a1fSBen Widawsky { 97241a05a3aSDaniel Vetter if (pm_iir & GEN6_PM_RPS_EVENTS) { 97359cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 9744848405cSBen Widawsky dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 975baf02a1fSBen Widawsky I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 976baf02a1fSBen Widawsky /* never want to mask useful interrupts. (also posting read) */ 9774848405cSBen Widawsky WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); 97859cdb63dSDaniel Vetter spin_unlock(&dev_priv->irq_lock); 9792adbee62SDaniel Vetter 9802adbee62SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->rps.work); 98141a05a3aSDaniel Vetter } 982baf02a1fSBen Widawsky 98312638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 98412638c57SBen Widawsky notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 98512638c57SBen Widawsky 98612638c57SBen Widawsky if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 98712638c57SBen Widawsky DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 98812638c57SBen Widawsky i915_handle_error(dev_priv->dev, false); 98912638c57SBen Widawsky } 99012638c57SBen Widawsky } 991baf02a1fSBen Widawsky 992ff1f525eSDaniel Vetter static irqreturn_t valleyview_irq_handler(int irq, void *arg) 9937e231dbeSJesse Barnes { 9947e231dbeSJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 9957e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 9967e231dbeSJesse Barnes u32 iir, gt_iir, pm_iir; 9977e231dbeSJesse Barnes irqreturn_t ret = IRQ_NONE; 9987e231dbeSJesse Barnes unsigned long irqflags; 9997e231dbeSJesse Barnes int pipe; 10007e231dbeSJesse Barnes u32 pipe_stats[I915_MAX_PIPES]; 10017e231dbeSJesse Barnes 10027e231dbeSJesse Barnes atomic_inc(&dev_priv->irq_received); 10037e231dbeSJesse Barnes 10047e231dbeSJesse Barnes while (true) { 10057e231dbeSJesse Barnes iir = I915_READ(VLV_IIR); 10067e231dbeSJesse Barnes gt_iir = I915_READ(GTIIR); 10077e231dbeSJesse Barnes pm_iir = I915_READ(GEN6_PMIIR); 10087e231dbeSJesse Barnes 10097e231dbeSJesse Barnes if (gt_iir == 0 && pm_iir == 0 && iir == 0) 10107e231dbeSJesse Barnes goto out; 10117e231dbeSJesse Barnes 10127e231dbeSJesse Barnes ret = IRQ_HANDLED; 10137e231dbeSJesse Barnes 1014e7b4c6b1SDaniel Vetter snb_gt_irq_handler(dev, dev_priv, gt_iir); 10157e231dbeSJesse Barnes 10167e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 10177e231dbeSJesse Barnes for_each_pipe(pipe) { 10187e231dbeSJesse Barnes int reg = PIPESTAT(pipe); 10197e231dbeSJesse Barnes pipe_stats[pipe] = I915_READ(reg); 10207e231dbeSJesse Barnes 10217e231dbeSJesse Barnes /* 10227e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 10237e231dbeSJesse Barnes */ 10247e231dbeSJesse Barnes if (pipe_stats[pipe] & 0x8000ffff) { 10257e231dbeSJesse Barnes if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 10267e231dbeSJesse Barnes DRM_DEBUG_DRIVER("pipe %c underrun\n", 10277e231dbeSJesse Barnes pipe_name(pipe)); 10287e231dbeSJesse Barnes I915_WRITE(reg, pipe_stats[pipe]); 10297e231dbeSJesse Barnes } 10307e231dbeSJesse Barnes } 10317e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 10327e231dbeSJesse Barnes 103331acc7f5SJesse Barnes for_each_pipe(pipe) { 103431acc7f5SJesse Barnes if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 103531acc7f5SJesse Barnes drm_handle_vblank(dev, pipe); 103631acc7f5SJesse Barnes 103731acc7f5SJesse Barnes if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 103831acc7f5SJesse Barnes intel_prepare_page_flip(dev, pipe); 103931acc7f5SJesse Barnes intel_finish_page_flip(dev, pipe); 104031acc7f5SJesse Barnes } 104131acc7f5SJesse Barnes } 104231acc7f5SJesse Barnes 10437e231dbeSJesse Barnes /* Consume port. Then clear IIR or we'll miss events */ 10447e231dbeSJesse Barnes if (iir & I915_DISPLAY_PORT_INTERRUPT) { 10457e231dbeSJesse Barnes u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1046b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 10477e231dbeSJesse Barnes 10487e231dbeSJesse Barnes DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 10497e231dbeSJesse Barnes hotplug_status); 105091d131d2SDaniel Vetter 105110a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 105291d131d2SDaniel Vetter 10537e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 10547e231dbeSJesse Barnes I915_READ(PORT_HOTPLUG_STAT); 10557e231dbeSJesse Barnes } 10567e231dbeSJesse Barnes 1057515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1058515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 10597e231dbeSJesse Barnes 10604848405cSBen Widawsky if (pm_iir & GEN6_PM_RPS_EVENTS) 1061d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 10627e231dbeSJesse Barnes 10637e231dbeSJesse Barnes I915_WRITE(GTIIR, gt_iir); 10647e231dbeSJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 10657e231dbeSJesse Barnes I915_WRITE(VLV_IIR, iir); 10667e231dbeSJesse Barnes } 10677e231dbeSJesse Barnes 10687e231dbeSJesse Barnes out: 10697e231dbeSJesse Barnes return ret; 10707e231dbeSJesse Barnes } 10717e231dbeSJesse Barnes 107223e81d69SAdam Jackson static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1073776ad806SJesse Barnes { 1074776ad806SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 10759db4a9c7SJesse Barnes int pipe; 1076b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1077776ad806SJesse Barnes 107810a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 107991d131d2SDaniel Vetter 1080cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 1081cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1082776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 1083cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1084cfc33bf7SVille Syrjälä port_name(port)); 1085cfc33bf7SVille Syrjälä } 1086776ad806SJesse Barnes 1087ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 1088ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 1089ce99c256SDaniel Vetter 1090776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 1091515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 1092776ad806SJesse Barnes 1093776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 1094776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1095776ad806SJesse Barnes 1096776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 1097776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1098776ad806SJesse Barnes 1099776ad806SJesse Barnes if (pch_iir & SDE_POISON) 1100776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 1101776ad806SJesse Barnes 11029db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 11039db4a9c7SJesse Barnes for_each_pipe(pipe) 11049db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 11059db4a9c7SJesse Barnes pipe_name(pipe), 11069db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 1107776ad806SJesse Barnes 1108776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1109776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1110776ad806SJesse Barnes 1111776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1112776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1113776ad806SJesse Barnes 1114776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 11158664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11168664281bSPaulo Zanoni false)) 11178664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11188664281bSPaulo Zanoni 11198664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 11208664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11218664281bSPaulo Zanoni false)) 11228664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11238664281bSPaulo Zanoni } 11248664281bSPaulo Zanoni 11258664281bSPaulo Zanoni static void ivb_err_int_handler(struct drm_device *dev) 11268664281bSPaulo Zanoni { 11278664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11288664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 11298664281bSPaulo Zanoni 1130de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 1131de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1132de032bf4SPaulo Zanoni 11338664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_A) 11348664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 11358664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 11368664281bSPaulo Zanoni 11378664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_B) 11388664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 11398664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 11408664281bSPaulo Zanoni 11418664281bSPaulo Zanoni if (err_int & ERR_INT_FIFO_UNDERRUN_C) 11428664281bSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) 11438664281bSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); 11448664281bSPaulo Zanoni 11458664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 11468664281bSPaulo Zanoni } 11478664281bSPaulo Zanoni 11488664281bSPaulo Zanoni static void cpt_serr_int_handler(struct drm_device *dev) 11498664281bSPaulo Zanoni { 11508664281bSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 11518664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 11528664281bSPaulo Zanoni 1153de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 1154de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 1155de032bf4SPaulo Zanoni 11568664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 11578664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 11588664281bSPaulo Zanoni false)) 11598664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 11608664281bSPaulo Zanoni 11618664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 11628664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 11638664281bSPaulo Zanoni false)) 11648664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 11658664281bSPaulo Zanoni 11668664281bSPaulo Zanoni if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 11678664281bSPaulo Zanoni if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 11688664281bSPaulo Zanoni false)) 11698664281bSPaulo Zanoni DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 11708664281bSPaulo Zanoni 11718664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 1172776ad806SJesse Barnes } 1173776ad806SJesse Barnes 117423e81d69SAdam Jackson static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 117523e81d69SAdam Jackson { 117623e81d69SAdam Jackson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 117723e81d69SAdam Jackson int pipe; 1178b543fb04SEgbert Eich u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 117923e81d69SAdam Jackson 118010a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 118191d131d2SDaniel Vetter 1182cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1183cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 118423e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 1185cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1186cfc33bf7SVille Syrjälä port_name(port)); 1187cfc33bf7SVille Syrjälä } 118823e81d69SAdam Jackson 118923e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 1190ce99c256SDaniel Vetter dp_aux_irq_handler(dev); 119123e81d69SAdam Jackson 119223e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 1193515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 119423e81d69SAdam Jackson 119523e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 119623e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 119723e81d69SAdam Jackson 119823e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 119923e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 120023e81d69SAdam Jackson 120123e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 120223e81d69SAdam Jackson for_each_pipe(pipe) 120323e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 120423e81d69SAdam Jackson pipe_name(pipe), 120523e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 12068664281bSPaulo Zanoni 12078664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 12088664281bSPaulo Zanoni cpt_serr_int_handler(dev); 120923e81d69SAdam Jackson } 121023e81d69SAdam Jackson 1211c008bc6eSPaulo Zanoni static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1212c008bc6eSPaulo Zanoni { 1213c008bc6eSPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 1214c008bc6eSPaulo Zanoni 1215c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 1216c008bc6eSPaulo Zanoni dp_aux_irq_handler(dev); 1217c008bc6eSPaulo Zanoni 1218c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 1219c008bc6eSPaulo Zanoni intel_opregion_asle_intr(dev); 1220c008bc6eSPaulo Zanoni 1221c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_VBLANK) 1222c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 0); 1223c008bc6eSPaulo Zanoni 1224c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_VBLANK) 1225c008bc6eSPaulo Zanoni drm_handle_vblank(dev, 1); 1226c008bc6eSPaulo Zanoni 1227c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 1228c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 1229c008bc6eSPaulo Zanoni 1230c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEA_FIFO_UNDERRUN) 1231c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) 1232c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); 1233c008bc6eSPaulo Zanoni 1234c008bc6eSPaulo Zanoni if (de_iir & DE_PIPEB_FIFO_UNDERRUN) 1235c008bc6eSPaulo Zanoni if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) 1236c008bc6eSPaulo Zanoni DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); 1237c008bc6eSPaulo Zanoni 1238c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEA_FLIP_DONE) { 1239c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 0); 1240c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 0); 1241c008bc6eSPaulo Zanoni } 1242c008bc6eSPaulo Zanoni 1243c008bc6eSPaulo Zanoni if (de_iir & DE_PLANEB_FLIP_DONE) { 1244c008bc6eSPaulo Zanoni intel_prepare_page_flip(dev, 1); 1245c008bc6eSPaulo Zanoni intel_finish_page_flip_plane(dev, 1); 1246c008bc6eSPaulo Zanoni } 1247c008bc6eSPaulo Zanoni 1248c008bc6eSPaulo Zanoni /* check event from PCH */ 1249c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 1250c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 1251c008bc6eSPaulo Zanoni 1252c008bc6eSPaulo Zanoni if (HAS_PCH_CPT(dev)) 1253c008bc6eSPaulo Zanoni cpt_irq_handler(dev, pch_iir); 1254c008bc6eSPaulo Zanoni else 1255c008bc6eSPaulo Zanoni ibx_irq_handler(dev, pch_iir); 1256c008bc6eSPaulo Zanoni 1257c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 1258c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 1259c008bc6eSPaulo Zanoni } 1260c008bc6eSPaulo Zanoni 1261c008bc6eSPaulo Zanoni if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1262c008bc6eSPaulo Zanoni ironlake_rps_change_irq_handler(dev); 1263c008bc6eSPaulo Zanoni } 1264c008bc6eSPaulo Zanoni 12659719fb98SPaulo Zanoni static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 12669719fb98SPaulo Zanoni { 12679719fb98SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 12689719fb98SPaulo Zanoni int i; 12699719fb98SPaulo Zanoni 12709719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 12719719fb98SPaulo Zanoni ivb_err_int_handler(dev); 12729719fb98SPaulo Zanoni 12739719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 12749719fb98SPaulo Zanoni dp_aux_irq_handler(dev); 12759719fb98SPaulo Zanoni 12769719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 12779719fb98SPaulo Zanoni intel_opregion_asle_intr(dev); 12789719fb98SPaulo Zanoni 12799719fb98SPaulo Zanoni for (i = 0; i < 3; i++) { 12809719fb98SPaulo Zanoni if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 12819719fb98SPaulo Zanoni drm_handle_vblank(dev, i); 12829719fb98SPaulo Zanoni if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 12839719fb98SPaulo Zanoni intel_prepare_page_flip(dev, i); 12849719fb98SPaulo Zanoni intel_finish_page_flip_plane(dev, i); 12859719fb98SPaulo Zanoni } 12869719fb98SPaulo Zanoni } 12879719fb98SPaulo Zanoni 12889719fb98SPaulo Zanoni /* check event from PCH */ 12899719fb98SPaulo Zanoni if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 12909719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 12919719fb98SPaulo Zanoni 12929719fb98SPaulo Zanoni cpt_irq_handler(dev, pch_iir); 12939719fb98SPaulo Zanoni 12949719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 12959719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 12969719fb98SPaulo Zanoni } 12979719fb98SPaulo Zanoni } 12989719fb98SPaulo Zanoni 1299f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1300b1f14ad0SJesse Barnes { 1301b1f14ad0SJesse Barnes struct drm_device *dev = (struct drm_device *) arg; 1302b1f14ad0SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1303f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 13040e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 1305b1f14ad0SJesse Barnes 1306b1f14ad0SJesse Barnes atomic_inc(&dev_priv->irq_received); 1307b1f14ad0SJesse Barnes 13088664281bSPaulo Zanoni /* We get interrupts on unclaimed registers, so check for this before we 13098664281bSPaulo Zanoni * do any I915_{READ,WRITE}. */ 13108664281bSPaulo Zanoni if (IS_HASWELL(dev) && 13118664281bSPaulo Zanoni (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 13128664281bSPaulo Zanoni DRM_ERROR("Unclaimed register before interrupt\n"); 13138664281bSPaulo Zanoni I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 13148664281bSPaulo Zanoni } 13158664281bSPaulo Zanoni 1316b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 1317b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 1318b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 131923a78516SPaulo Zanoni POSTING_READ(DEIER); 13200e43406bSChris Wilson 132144498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 132244498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 132344498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 132444498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 132544498aeaSPaulo Zanoni * due to its back queue). */ 1326ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 132744498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 132844498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 132944498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1330ab5c608bSBen Widawsky } 133144498aeaSPaulo Zanoni 13328664281bSPaulo Zanoni /* On Haswell, also mask ERR_INT because we don't want to risk 13338664281bSPaulo Zanoni * generating "unclaimed register" interrupts from inside the interrupt 13348664281bSPaulo Zanoni * handler. */ 13354bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 13364bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 13378664281bSPaulo Zanoni ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 13384bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 13394bc9d430SDaniel Vetter } 13408664281bSPaulo Zanoni 13410e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 13420e43406bSChris Wilson if (gt_iir) { 1343f1af8fc1SPaulo Zanoni if (IS_GEN5(dev)) 1344f1af8fc1SPaulo Zanoni ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1345f1af8fc1SPaulo Zanoni else 13460e43406bSChris Wilson snb_gt_irq_handler(dev, dev_priv, gt_iir); 13470e43406bSChris Wilson I915_WRITE(GTIIR, gt_iir); 13480e43406bSChris Wilson ret = IRQ_HANDLED; 13490e43406bSChris Wilson } 1350b1f14ad0SJesse Barnes 1351b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 13520e43406bSChris Wilson if (de_iir) { 1353f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) 13549719fb98SPaulo Zanoni ivb_display_irq_handler(dev, de_iir); 1355f1af8fc1SPaulo Zanoni else 1356f1af8fc1SPaulo Zanoni ilk_display_irq_handler(dev, de_iir); 13570e43406bSChris Wilson I915_WRITE(DEIIR, de_iir); 13580e43406bSChris Wilson ret = IRQ_HANDLED; 13590e43406bSChris Wilson } 13600e43406bSChris Wilson 1361f1af8fc1SPaulo Zanoni if (INTEL_INFO(dev)->gen >= 6) { 1362f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 13630e43406bSChris Wilson if (pm_iir) { 1364baf02a1fSBen Widawsky if (IS_HASWELL(dev)) 1365baf02a1fSBen Widawsky hsw_pm_irq_handler(dev_priv, pm_iir); 13664848405cSBen Widawsky else if (pm_iir & GEN6_PM_RPS_EVENTS) 1367d0ecd7e2SDaniel Vetter gen6_rps_irq_handler(dev_priv, pm_iir); 1368b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 13690e43406bSChris Wilson ret = IRQ_HANDLED; 13700e43406bSChris Wilson } 1371f1af8fc1SPaulo Zanoni } 1372b1f14ad0SJesse Barnes 13734bc9d430SDaniel Vetter if (IS_HASWELL(dev)) { 13744bc9d430SDaniel Vetter spin_lock(&dev_priv->irq_lock); 13754bc9d430SDaniel Vetter if (ivb_can_enable_err_int(dev)) 13768664281bSPaulo Zanoni ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 13774bc9d430SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 13784bc9d430SDaniel Vetter } 13798664281bSPaulo Zanoni 1380b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 1381b1f14ad0SJesse Barnes POSTING_READ(DEIER); 1382ab5c608bSBen Widawsky if (!HAS_PCH_NOP(dev)) { 138344498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 138444498aeaSPaulo Zanoni POSTING_READ(SDEIER); 1385ab5c608bSBen Widawsky } 1386b1f14ad0SJesse Barnes 1387b1f14ad0SJesse Barnes return ret; 1388b1f14ad0SJesse Barnes } 1389b1f14ad0SJesse Barnes 13908a905236SJesse Barnes /** 13918a905236SJesse Barnes * i915_error_work_func - do process context error handling work 13928a905236SJesse Barnes * @work: work struct 13938a905236SJesse Barnes * 13948a905236SJesse Barnes * Fire an error uevent so userspace can see that a hang or error 13958a905236SJesse Barnes * was detected. 13968a905236SJesse Barnes */ 13978a905236SJesse Barnes static void i915_error_work_func(struct work_struct *work) 13988a905236SJesse Barnes { 13991f83fee0SDaniel Vetter struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 14001f83fee0SDaniel Vetter work); 14011f83fee0SDaniel Vetter drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 14021f83fee0SDaniel Vetter gpu_error); 14038a905236SJesse Barnes struct drm_device *dev = dev_priv->dev; 1404f69061beSDaniel Vetter struct intel_ring_buffer *ring; 1405f316a42cSBen Gamari char *error_event[] = { "ERROR=1", NULL }; 1406f316a42cSBen Gamari char *reset_event[] = { "RESET=1", NULL }; 1407f316a42cSBen Gamari char *reset_done_event[] = { "ERROR=0", NULL }; 1408f69061beSDaniel Vetter int i, ret; 14098a905236SJesse Barnes 1410f316a42cSBen Gamari kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 14118a905236SJesse Barnes 14127db0ba24SDaniel Vetter /* 14137db0ba24SDaniel Vetter * Note that there's only one work item which does gpu resets, so we 14147db0ba24SDaniel Vetter * need not worry about concurrent gpu resets potentially incrementing 14157db0ba24SDaniel Vetter * error->reset_counter twice. We only need to take care of another 14167db0ba24SDaniel Vetter * racing irq/hangcheck declaring the gpu dead for a second time. A 14177db0ba24SDaniel Vetter * quick check for that is good enough: schedule_work ensures the 14187db0ba24SDaniel Vetter * correct ordering between hang detection and this work item, and since 14197db0ba24SDaniel Vetter * the reset in-progress bit is only ever set by code outside of this 14207db0ba24SDaniel Vetter * work we don't need to worry about any other races. 14217db0ba24SDaniel Vetter */ 14227db0ba24SDaniel Vetter if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 142344d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 14247db0ba24SDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 14257db0ba24SDaniel Vetter reset_event); 14261f83fee0SDaniel Vetter 1427f69061beSDaniel Vetter ret = i915_reset(dev); 1428f69061beSDaniel Vetter 1429f69061beSDaniel Vetter if (ret == 0) { 1430f69061beSDaniel Vetter /* 1431f69061beSDaniel Vetter * After all the gem state is reset, increment the reset 1432f69061beSDaniel Vetter * counter and wake up everyone waiting for the reset to 1433f69061beSDaniel Vetter * complete. 1434f69061beSDaniel Vetter * 1435f69061beSDaniel Vetter * Since unlock operations are a one-sided barrier only, 1436f69061beSDaniel Vetter * we need to insert a barrier here to order any seqno 1437f69061beSDaniel Vetter * updates before 1438f69061beSDaniel Vetter * the counter increment. 1439f69061beSDaniel Vetter */ 1440f69061beSDaniel Vetter smp_mb__before_atomic_inc(); 1441f69061beSDaniel Vetter atomic_inc(&dev_priv->gpu_error.reset_counter); 1442f69061beSDaniel Vetter 1443f69061beSDaniel Vetter kobject_uevent_env(&dev->primary->kdev.kobj, 1444f69061beSDaniel Vetter KOBJ_CHANGE, reset_done_event); 14451f83fee0SDaniel Vetter } else { 14461f83fee0SDaniel Vetter atomic_set(&error->reset_counter, I915_WEDGED); 1447f316a42cSBen Gamari } 14481f83fee0SDaniel Vetter 1449f69061beSDaniel Vetter for_each_ring(ring, dev_priv, i) 1450f69061beSDaniel Vetter wake_up_all(&ring->irq_queue); 1451f69061beSDaniel Vetter 145296a02917SVille Syrjälä intel_display_handle_reset(dev); 145396a02917SVille Syrjälä 14541f83fee0SDaniel Vetter wake_up_all(&dev_priv->gpu_error.reset_queue); 1455f316a42cSBen Gamari } 14568a905236SJesse Barnes } 14578a905236SJesse Barnes 145835aed2e6SChris Wilson static void i915_report_and_clear_eir(struct drm_device *dev) 1459c0e09200SDave Airlie { 14608a905236SJesse Barnes struct drm_i915_private *dev_priv = dev->dev_private; 1461bd9854f9SBen Widawsky uint32_t instdone[I915_NUM_INSTDONE_REG]; 146263eeaf38SJesse Barnes u32 eir = I915_READ(EIR); 1463050ee91fSBen Widawsky int pipe, i; 146463eeaf38SJesse Barnes 146535aed2e6SChris Wilson if (!eir) 146635aed2e6SChris Wilson return; 146763eeaf38SJesse Barnes 1468a70491ccSJoe Perches pr_err("render error detected, EIR: 0x%08x\n", eir); 14698a905236SJesse Barnes 1470bd9854f9SBen Widawsky i915_get_extra_instdone(dev, instdone); 1471bd9854f9SBen Widawsky 14728a905236SJesse Barnes if (IS_G4X(dev)) { 14738a905236SJesse Barnes if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 14748a905236SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 14758a905236SJesse Barnes 1476a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1477a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1478050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1479050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1480a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1481a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 14828a905236SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 14833143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 14848a905236SJesse Barnes } 14858a905236SJesse Barnes if (eir & GM45_ERROR_PAGE_TABLE) { 14868a905236SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1487a70491ccSJoe Perches pr_err("page table error\n"); 1488a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 14898a905236SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 14903143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 14918a905236SJesse Barnes } 14928a905236SJesse Barnes } 14938a905236SJesse Barnes 1494a6c45cf0SChris Wilson if (!IS_GEN2(dev)) { 149563eeaf38SJesse Barnes if (eir & I915_ERROR_PAGE_TABLE) { 149663eeaf38SJesse Barnes u32 pgtbl_err = I915_READ(PGTBL_ER); 1497a70491ccSJoe Perches pr_err("page table error\n"); 1498a70491ccSJoe Perches pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 149963eeaf38SJesse Barnes I915_WRITE(PGTBL_ER, pgtbl_err); 15003143a2bfSChris Wilson POSTING_READ(PGTBL_ER); 150163eeaf38SJesse Barnes } 15028a905236SJesse Barnes } 15038a905236SJesse Barnes 150463eeaf38SJesse Barnes if (eir & I915_ERROR_MEMORY_REFRESH) { 1505a70491ccSJoe Perches pr_err("memory refresh error:\n"); 15069db4a9c7SJesse Barnes for_each_pipe(pipe) 1507a70491ccSJoe Perches pr_err("pipe %c stat: 0x%08x\n", 15089db4a9c7SJesse Barnes pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 150963eeaf38SJesse Barnes /* pipestat has already been acked */ 151063eeaf38SJesse Barnes } 151163eeaf38SJesse Barnes if (eir & I915_ERROR_INSTRUCTION) { 1512a70491ccSJoe Perches pr_err("instruction error\n"); 1513a70491ccSJoe Perches pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1514050ee91fSBen Widawsky for (i = 0; i < ARRAY_SIZE(instdone); i++) 1515050ee91fSBen Widawsky pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1516a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen < 4) { 151763eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR); 151863eeaf38SJesse Barnes 1519a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1520a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1521a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 152263eeaf38SJesse Barnes I915_WRITE(IPEIR, ipeir); 15233143a2bfSChris Wilson POSTING_READ(IPEIR); 152463eeaf38SJesse Barnes } else { 152563eeaf38SJesse Barnes u32 ipeir = I915_READ(IPEIR_I965); 152663eeaf38SJesse Barnes 1527a70491ccSJoe Perches pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1528a70491ccSJoe Perches pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1529a70491ccSJoe Perches pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1530a70491ccSJoe Perches pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 153163eeaf38SJesse Barnes I915_WRITE(IPEIR_I965, ipeir); 15323143a2bfSChris Wilson POSTING_READ(IPEIR_I965); 153363eeaf38SJesse Barnes } 153463eeaf38SJesse Barnes } 153563eeaf38SJesse Barnes 153663eeaf38SJesse Barnes I915_WRITE(EIR, eir); 15373143a2bfSChris Wilson POSTING_READ(EIR); 153863eeaf38SJesse Barnes eir = I915_READ(EIR); 153963eeaf38SJesse Barnes if (eir) { 154063eeaf38SJesse Barnes /* 154163eeaf38SJesse Barnes * some errors might have become stuck, 154263eeaf38SJesse Barnes * mask them. 154363eeaf38SJesse Barnes */ 154463eeaf38SJesse Barnes DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 154563eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 154663eeaf38SJesse Barnes I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 154763eeaf38SJesse Barnes } 154835aed2e6SChris Wilson } 154935aed2e6SChris Wilson 155035aed2e6SChris Wilson /** 155135aed2e6SChris Wilson * i915_handle_error - handle an error interrupt 155235aed2e6SChris Wilson * @dev: drm device 155335aed2e6SChris Wilson * 155435aed2e6SChris Wilson * Do some basic checking of regsiter state at error interrupt time and 155535aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 155635aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 155735aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 155835aed2e6SChris Wilson * of a ring dump etc.). 155935aed2e6SChris Wilson */ 1560527f9e90SChris Wilson void i915_handle_error(struct drm_device *dev, bool wedged) 156135aed2e6SChris Wilson { 156235aed2e6SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 1563b4519513SChris Wilson struct intel_ring_buffer *ring; 1564b4519513SChris Wilson int i; 156535aed2e6SChris Wilson 156635aed2e6SChris Wilson i915_capture_error_state(dev); 156735aed2e6SChris Wilson i915_report_and_clear_eir(dev); 15688a905236SJesse Barnes 1569ba1234d1SBen Gamari if (wedged) { 1570f69061beSDaniel Vetter atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1571f69061beSDaniel Vetter &dev_priv->gpu_error.reset_counter); 1572ba1234d1SBen Gamari 157311ed50ecSBen Gamari /* 15741f83fee0SDaniel Vetter * Wakeup waiting processes so that the reset work item 15751f83fee0SDaniel Vetter * doesn't deadlock trying to grab various locks. 157611ed50ecSBen Gamari */ 1577b4519513SChris Wilson for_each_ring(ring, dev_priv, i) 1578b4519513SChris Wilson wake_up_all(&ring->irq_queue); 157911ed50ecSBen Gamari } 158011ed50ecSBen Gamari 158199584db3SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 15828a905236SJesse Barnes } 15838a905236SJesse Barnes 158421ad8330SVille Syrjälä static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 15854e5359cdSSimon Farnsworth { 15864e5359cdSSimon Farnsworth drm_i915_private_t *dev_priv = dev->dev_private; 15874e5359cdSSimon Farnsworth struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 15884e5359cdSSimon Farnsworth struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 158905394f39SChris Wilson struct drm_i915_gem_object *obj; 15904e5359cdSSimon Farnsworth struct intel_unpin_work *work; 15914e5359cdSSimon Farnsworth unsigned long flags; 15924e5359cdSSimon Farnsworth bool stall_detected; 15934e5359cdSSimon Farnsworth 15944e5359cdSSimon Farnsworth /* Ignore early vblank irqs */ 15954e5359cdSSimon Farnsworth if (intel_crtc == NULL) 15964e5359cdSSimon Farnsworth return; 15974e5359cdSSimon Farnsworth 15984e5359cdSSimon Farnsworth spin_lock_irqsave(&dev->event_lock, flags); 15994e5359cdSSimon Farnsworth work = intel_crtc->unpin_work; 16004e5359cdSSimon Farnsworth 1601e7d841caSChris Wilson if (work == NULL || 1602e7d841caSChris Wilson atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1603e7d841caSChris Wilson !work->enable_stall_check) { 16044e5359cdSSimon Farnsworth /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 16054e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 16064e5359cdSSimon Farnsworth return; 16074e5359cdSSimon Farnsworth } 16084e5359cdSSimon Farnsworth 16094e5359cdSSimon Farnsworth /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 161005394f39SChris Wilson obj = work->pending_flip_obj; 1611a6c45cf0SChris Wilson if (INTEL_INFO(dev)->gen >= 4) { 16129db4a9c7SJesse Barnes int dspsurf = DSPSURF(intel_crtc->plane); 1613446f2545SArmin Reese stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1614f343c5f6SBen Widawsky i915_gem_obj_ggtt_offset(obj); 16154e5359cdSSimon Farnsworth } else { 16169db4a9c7SJesse Barnes int dspaddr = DSPADDR(intel_crtc->plane); 1617f343c5f6SBen Widawsky stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 161801f2c773SVille Syrjälä crtc->y * crtc->fb->pitches[0] + 16194e5359cdSSimon Farnsworth crtc->x * crtc->fb->bits_per_pixel/8); 16204e5359cdSSimon Farnsworth } 16214e5359cdSSimon Farnsworth 16224e5359cdSSimon Farnsworth spin_unlock_irqrestore(&dev->event_lock, flags); 16234e5359cdSSimon Farnsworth 16244e5359cdSSimon Farnsworth if (stall_detected) { 16254e5359cdSSimon Farnsworth DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 16264e5359cdSSimon Farnsworth intel_prepare_page_flip(dev, intel_crtc->plane); 16274e5359cdSSimon Farnsworth } 16284e5359cdSSimon Farnsworth } 16294e5359cdSSimon Farnsworth 163042f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 163142f52ef8SKeith Packard * we use as a pipe index 163242f52ef8SKeith Packard */ 1633f71d4af4SJesse Barnes static int i915_enable_vblank(struct drm_device *dev, int pipe) 16340a3e67a4SJesse Barnes { 16350a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1636e9d21d7fSKeith Packard unsigned long irqflags; 163771e0ffa5SJesse Barnes 16385eddb70bSChris Wilson if (!i915_pipe_enabled(dev, pipe)) 163971e0ffa5SJesse Barnes return -EINVAL; 16400a3e67a4SJesse Barnes 16411ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1642f796cf8fSJesse Barnes if (INTEL_INFO(dev)->gen >= 4) 16437c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 16447c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 16450a3e67a4SJesse Barnes else 16467c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 16477c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE); 16488692d00eSChris Wilson 16498692d00eSChris Wilson /* maintain vblank delivery even in deep C-states */ 16508692d00eSChris Wilson if (dev_priv->info->gen == 3) 16516b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 16521ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 16538692d00eSChris Wilson 16540a3e67a4SJesse Barnes return 0; 16550a3e67a4SJesse Barnes } 16560a3e67a4SJesse Barnes 1657f71d4af4SJesse Barnes static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1658f796cf8fSJesse Barnes { 1659f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1660f796cf8fSJesse Barnes unsigned long irqflags; 1661b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1662b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1663f796cf8fSJesse Barnes 1664f796cf8fSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 1665f796cf8fSJesse Barnes return -EINVAL; 1666f796cf8fSJesse Barnes 1667f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1668b518421fSPaulo Zanoni ironlake_enable_display_irq(dev_priv, bit); 1669b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1670b1f14ad0SJesse Barnes 1671b1f14ad0SJesse Barnes return 0; 1672b1f14ad0SJesse Barnes } 1673b1f14ad0SJesse Barnes 16747e231dbeSJesse Barnes static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 16757e231dbeSJesse Barnes { 16767e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 16777e231dbeSJesse Barnes unsigned long irqflags; 167831acc7f5SJesse Barnes u32 imr; 16797e231dbeSJesse Barnes 16807e231dbeSJesse Barnes if (!i915_pipe_enabled(dev, pipe)) 16817e231dbeSJesse Barnes return -EINVAL; 16827e231dbeSJesse Barnes 16837e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 16847e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 168531acc7f5SJesse Barnes if (pipe == 0) 16867e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 168731acc7f5SJesse Barnes else 16887e231dbeSJesse Barnes imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 16897e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 169031acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, pipe, 169131acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 16927e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 16937e231dbeSJesse Barnes 16947e231dbeSJesse Barnes return 0; 16957e231dbeSJesse Barnes } 16967e231dbeSJesse Barnes 169742f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 169842f52ef8SKeith Packard * we use as a pipe index 169942f52ef8SKeith Packard */ 1700f71d4af4SJesse Barnes static void i915_disable_vblank(struct drm_device *dev, int pipe) 17010a3e67a4SJesse Barnes { 17020a3e67a4SJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1703e9d21d7fSKeith Packard unsigned long irqflags; 17040a3e67a4SJesse Barnes 17051ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 17068692d00eSChris Wilson if (dev_priv->info->gen == 3) 17076b26c86dSDaniel Vetter I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 17088692d00eSChris Wilson 17097c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 17107c463586SKeith Packard PIPE_VBLANK_INTERRUPT_ENABLE | 17117c463586SKeith Packard PIPE_START_VBLANK_INTERRUPT_ENABLE); 17121ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17130a3e67a4SJesse Barnes } 17140a3e67a4SJesse Barnes 1715f71d4af4SJesse Barnes static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1716f796cf8fSJesse Barnes { 1717f796cf8fSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1718f796cf8fSJesse Barnes unsigned long irqflags; 1719b518421fSPaulo Zanoni uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 1720b518421fSPaulo Zanoni DE_PIPE_VBLANK_ILK(pipe); 1721f796cf8fSJesse Barnes 1722f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1723b518421fSPaulo Zanoni ironlake_disable_display_irq(dev_priv, bit); 1724b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1725b1f14ad0SJesse Barnes } 1726b1f14ad0SJesse Barnes 17277e231dbeSJesse Barnes static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 17287e231dbeSJesse Barnes { 17297e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 17307e231dbeSJesse Barnes unsigned long irqflags; 173131acc7f5SJesse Barnes u32 imr; 17327e231dbeSJesse Barnes 17337e231dbeSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 173431acc7f5SJesse Barnes i915_disable_pipestat(dev_priv, pipe, 173531acc7f5SJesse Barnes PIPE_START_VBLANK_INTERRUPT_ENABLE); 17367e231dbeSJesse Barnes imr = I915_READ(VLV_IMR); 173731acc7f5SJesse Barnes if (pipe == 0) 17387e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 173931acc7f5SJesse Barnes else 17407e231dbeSJesse Barnes imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 17417e231dbeSJesse Barnes I915_WRITE(VLV_IMR, imr); 17427e231dbeSJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 17437e231dbeSJesse Barnes } 17447e231dbeSJesse Barnes 1745893eead0SChris Wilson static u32 1746893eead0SChris Wilson ring_last_seqno(struct intel_ring_buffer *ring) 1747852835f3SZou Nan hai { 1748893eead0SChris Wilson return list_entry(ring->request_list.prev, 1749893eead0SChris Wilson struct drm_i915_gem_request, list)->seqno; 1750893eead0SChris Wilson } 1751893eead0SChris Wilson 17529107e9d2SChris Wilson static bool 17539107e9d2SChris Wilson ring_idle(struct intel_ring_buffer *ring, u32 seqno) 1754893eead0SChris Wilson { 17559107e9d2SChris Wilson return (list_empty(&ring->request_list) || 17569107e9d2SChris Wilson i915_seqno_passed(seqno, ring_last_seqno(ring))); 1757f65d9421SBen Gamari } 1758f65d9421SBen Gamari 17596274f212SChris Wilson static struct intel_ring_buffer * 17606274f212SChris Wilson semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 1761a24a11e6SChris Wilson { 1762a24a11e6SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 17636274f212SChris Wilson u32 cmd, ipehr, acthd, acthd_min; 1764a24a11e6SChris Wilson 1765a24a11e6SChris Wilson ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 1766a24a11e6SChris Wilson if ((ipehr & ~(0x3 << 16)) != 1767a24a11e6SChris Wilson (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 17686274f212SChris Wilson return NULL; 1769a24a11e6SChris Wilson 1770a24a11e6SChris Wilson /* ACTHD is likely pointing to the dword after the actual command, 1771a24a11e6SChris Wilson * so scan backwards until we find the MBOX. 1772a24a11e6SChris Wilson */ 17736274f212SChris Wilson acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 1774a24a11e6SChris Wilson acthd_min = max((int)acthd - 3 * 4, 0); 1775a24a11e6SChris Wilson do { 1776a24a11e6SChris Wilson cmd = ioread32(ring->virtual_start + acthd); 1777a24a11e6SChris Wilson if (cmd == ipehr) 1778a24a11e6SChris Wilson break; 1779a24a11e6SChris Wilson 1780a24a11e6SChris Wilson acthd -= 4; 1781a24a11e6SChris Wilson if (acthd < acthd_min) 17826274f212SChris Wilson return NULL; 1783a24a11e6SChris Wilson } while (1); 1784a24a11e6SChris Wilson 17856274f212SChris Wilson *seqno = ioread32(ring->virtual_start+acthd+4)+1; 17866274f212SChris Wilson return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 1787a24a11e6SChris Wilson } 1788a24a11e6SChris Wilson 17896274f212SChris Wilson static int semaphore_passed(struct intel_ring_buffer *ring) 17906274f212SChris Wilson { 17916274f212SChris Wilson struct drm_i915_private *dev_priv = ring->dev->dev_private; 17926274f212SChris Wilson struct intel_ring_buffer *signaller; 17936274f212SChris Wilson u32 seqno, ctl; 17946274f212SChris Wilson 17956274f212SChris Wilson ring->hangcheck.deadlock = true; 17966274f212SChris Wilson 17976274f212SChris Wilson signaller = semaphore_waits_for(ring, &seqno); 17986274f212SChris Wilson if (signaller == NULL || signaller->hangcheck.deadlock) 17996274f212SChris Wilson return -1; 18006274f212SChris Wilson 18016274f212SChris Wilson /* cursory check for an unkickable deadlock */ 18026274f212SChris Wilson ctl = I915_READ_CTL(signaller); 18036274f212SChris Wilson if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 18046274f212SChris Wilson return -1; 18056274f212SChris Wilson 18066274f212SChris Wilson return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 18076274f212SChris Wilson } 18086274f212SChris Wilson 18096274f212SChris Wilson static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 18106274f212SChris Wilson { 18116274f212SChris Wilson struct intel_ring_buffer *ring; 18126274f212SChris Wilson int i; 18136274f212SChris Wilson 18146274f212SChris Wilson for_each_ring(ring, dev_priv, i) 18156274f212SChris Wilson ring->hangcheck.deadlock = false; 18166274f212SChris Wilson } 18176274f212SChris Wilson 1818ad8beaeaSMika Kuoppala static enum intel_ring_hangcheck_action 1819ad8beaeaSMika Kuoppala ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 18201ec14ad3SChris Wilson { 18211ec14ad3SChris Wilson struct drm_device *dev = ring->dev; 18221ec14ad3SChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 18239107e9d2SChris Wilson u32 tmp; 18249107e9d2SChris Wilson 18256274f212SChris Wilson if (ring->hangcheck.acthd != acthd) 18266274f212SChris Wilson return active; 18276274f212SChris Wilson 18289107e9d2SChris Wilson if (IS_GEN2(dev)) 18296274f212SChris Wilson return hung; 18309107e9d2SChris Wilson 18319107e9d2SChris Wilson /* Is the chip hanging on a WAIT_FOR_EVENT? 18329107e9d2SChris Wilson * If so we can simply poke the RB_WAIT bit 18339107e9d2SChris Wilson * and break the hang. This should work on 18349107e9d2SChris Wilson * all but the second generation chipsets. 18359107e9d2SChris Wilson */ 18369107e9d2SChris Wilson tmp = I915_READ_CTL(ring); 18371ec14ad3SChris Wilson if (tmp & RING_WAIT) { 18381ec14ad3SChris Wilson DRM_ERROR("Kicking stuck wait on %s\n", 18391ec14ad3SChris Wilson ring->name); 18401ec14ad3SChris Wilson I915_WRITE_CTL(ring, tmp); 18416274f212SChris Wilson return kick; 18421ec14ad3SChris Wilson } 1843a24a11e6SChris Wilson 18446274f212SChris Wilson if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 18456274f212SChris Wilson switch (semaphore_passed(ring)) { 18466274f212SChris Wilson default: 18476274f212SChris Wilson return hung; 18486274f212SChris Wilson case 1: 1849a24a11e6SChris Wilson DRM_ERROR("Kicking stuck semaphore on %s\n", 1850a24a11e6SChris Wilson ring->name); 1851a24a11e6SChris Wilson I915_WRITE_CTL(ring, tmp); 18526274f212SChris Wilson return kick; 18536274f212SChris Wilson case 0: 18546274f212SChris Wilson return wait; 18556274f212SChris Wilson } 18569107e9d2SChris Wilson } 18579107e9d2SChris Wilson 18586274f212SChris Wilson return hung; 1859a24a11e6SChris Wilson } 1860d1e61e7fSChris Wilson 1861f65d9421SBen Gamari /** 1862f65d9421SBen Gamari * This is called when the chip hasn't reported back with completed 186305407ff8SMika Kuoppala * batchbuffers in a long time. We keep track per ring seqno progress and 186405407ff8SMika Kuoppala * if there are no progress, hangcheck score for that ring is increased. 186505407ff8SMika Kuoppala * Further, acthd is inspected to see if the ring is stuck. On stuck case 186605407ff8SMika Kuoppala * we kick the ring. If we see no progress on three subsequent calls 186705407ff8SMika Kuoppala * we assume chip is wedged and try to fix it by resetting the chip. 1868f65d9421SBen Gamari */ 1869f65d9421SBen Gamari void i915_hangcheck_elapsed(unsigned long data) 1870f65d9421SBen Gamari { 1871f65d9421SBen Gamari struct drm_device *dev = (struct drm_device *)data; 1872f65d9421SBen Gamari drm_i915_private_t *dev_priv = dev->dev_private; 1873b4519513SChris Wilson struct intel_ring_buffer *ring; 1874b4519513SChris Wilson int i; 187505407ff8SMika Kuoppala int busy_count = 0, rings_hung = 0; 18769107e9d2SChris Wilson bool stuck[I915_NUM_RINGS] = { 0 }; 18779107e9d2SChris Wilson #define BUSY 1 18789107e9d2SChris Wilson #define KICK 5 18799107e9d2SChris Wilson #define HUNG 20 18809107e9d2SChris Wilson #define FIRE 30 1881893eead0SChris Wilson 18823e0dc6b0SBen Widawsky if (!i915_enable_hangcheck) 18833e0dc6b0SBen Widawsky return; 18843e0dc6b0SBen Widawsky 1885b4519513SChris Wilson for_each_ring(ring, dev_priv, i) { 188605407ff8SMika Kuoppala u32 seqno, acthd; 18879107e9d2SChris Wilson bool busy = true; 1888b4519513SChris Wilson 18896274f212SChris Wilson semaphore_clear_deadlocks(dev_priv); 18906274f212SChris Wilson 189105407ff8SMika Kuoppala seqno = ring->get_seqno(ring, false); 189205407ff8SMika Kuoppala acthd = intel_ring_get_active_head(ring); 189305407ff8SMika Kuoppala 189405407ff8SMika Kuoppala if (ring->hangcheck.seqno == seqno) { 18959107e9d2SChris Wilson if (ring_idle(ring, seqno)) { 18969107e9d2SChris Wilson if (waitqueue_active(&ring->irq_queue)) { 18979107e9d2SChris Wilson /* Issue a wake-up to catch stuck h/w. */ 18989107e9d2SChris Wilson DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 18999107e9d2SChris Wilson ring->name); 19009107e9d2SChris Wilson wake_up_all(&ring->irq_queue); 19019107e9d2SChris Wilson ring->hangcheck.score += HUNG; 19029107e9d2SChris Wilson } else 19039107e9d2SChris Wilson busy = false; 190405407ff8SMika Kuoppala } else { 19059107e9d2SChris Wilson int score; 19069107e9d2SChris Wilson 19076274f212SChris Wilson /* We always increment the hangcheck score 19086274f212SChris Wilson * if the ring is busy and still processing 19096274f212SChris Wilson * the same request, so that no single request 19106274f212SChris Wilson * can run indefinitely (such as a chain of 19116274f212SChris Wilson * batches). The only time we do not increment 19126274f212SChris Wilson * the hangcheck score on this ring, if this 19136274f212SChris Wilson * ring is in a legitimate wait for another 19146274f212SChris Wilson * ring. In that case the waiting ring is a 19156274f212SChris Wilson * victim and we want to be sure we catch the 19166274f212SChris Wilson * right culprit. Then every time we do kick 19176274f212SChris Wilson * the ring, add a small increment to the 19186274f212SChris Wilson * score so that we can catch a batch that is 19196274f212SChris Wilson * being repeatedly kicked and so responsible 19206274f212SChris Wilson * for stalling the machine. 19219107e9d2SChris Wilson */ 1922ad8beaeaSMika Kuoppala ring->hangcheck.action = ring_stuck(ring, 1923ad8beaeaSMika Kuoppala acthd); 1924ad8beaeaSMika Kuoppala 1925ad8beaeaSMika Kuoppala switch (ring->hangcheck.action) { 19266274f212SChris Wilson case wait: 19276274f212SChris Wilson score = 0; 19286274f212SChris Wilson break; 19296274f212SChris Wilson case active: 19309107e9d2SChris Wilson score = BUSY; 19316274f212SChris Wilson break; 19326274f212SChris Wilson case kick: 19336274f212SChris Wilson score = KICK; 19346274f212SChris Wilson break; 19356274f212SChris Wilson case hung: 19366274f212SChris Wilson score = HUNG; 19376274f212SChris Wilson stuck[i] = true; 19386274f212SChris Wilson break; 19396274f212SChris Wilson } 19409107e9d2SChris Wilson ring->hangcheck.score += score; 194105407ff8SMika Kuoppala } 19429107e9d2SChris Wilson } else { 19439107e9d2SChris Wilson /* Gradually reduce the count so that we catch DoS 19449107e9d2SChris Wilson * attempts across multiple batches. 19459107e9d2SChris Wilson */ 19469107e9d2SChris Wilson if (ring->hangcheck.score > 0) 19479107e9d2SChris Wilson ring->hangcheck.score--; 1948cbb465e7SChris Wilson } 1949f65d9421SBen Gamari 195005407ff8SMika Kuoppala ring->hangcheck.seqno = seqno; 195105407ff8SMika Kuoppala ring->hangcheck.acthd = acthd; 19529107e9d2SChris Wilson busy_count += busy; 195305407ff8SMika Kuoppala } 195405407ff8SMika Kuoppala 195505407ff8SMika Kuoppala for_each_ring(ring, dev_priv, i) { 19569107e9d2SChris Wilson if (ring->hangcheck.score > FIRE) { 1957acd78c11SBen Widawsky DRM_ERROR("%s on %s\n", 195805407ff8SMika Kuoppala stuck[i] ? "stuck" : "no progress", 1959a43adf07SChris Wilson ring->name); 1960a43adf07SChris Wilson rings_hung++; 196105407ff8SMika Kuoppala } 196205407ff8SMika Kuoppala } 196305407ff8SMika Kuoppala 196405407ff8SMika Kuoppala if (rings_hung) 196505407ff8SMika Kuoppala return i915_handle_error(dev, true); 196605407ff8SMika Kuoppala 196705407ff8SMika Kuoppala if (busy_count) 196805407ff8SMika Kuoppala /* Reset timer case chip hangs without another request 196905407ff8SMika Kuoppala * being added */ 197010cd45b6SMika Kuoppala i915_queue_hangcheck(dev); 197110cd45b6SMika Kuoppala } 197210cd45b6SMika Kuoppala 197310cd45b6SMika Kuoppala void i915_queue_hangcheck(struct drm_device *dev) 197410cd45b6SMika Kuoppala { 197510cd45b6SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 197610cd45b6SMika Kuoppala if (!i915_enable_hangcheck) 197710cd45b6SMika Kuoppala return; 197810cd45b6SMika Kuoppala 197999584db3SDaniel Vetter mod_timer(&dev_priv->gpu_error.hangcheck_timer, 198010cd45b6SMika Kuoppala round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1981f65d9421SBen Gamari } 1982f65d9421SBen Gamari 198391738a95SPaulo Zanoni static void ibx_irq_preinstall(struct drm_device *dev) 198491738a95SPaulo Zanoni { 198591738a95SPaulo Zanoni struct drm_i915_private *dev_priv = dev->dev_private; 198691738a95SPaulo Zanoni 198791738a95SPaulo Zanoni if (HAS_PCH_NOP(dev)) 198891738a95SPaulo Zanoni return; 198991738a95SPaulo Zanoni 199091738a95SPaulo Zanoni /* south display irq */ 199191738a95SPaulo Zanoni I915_WRITE(SDEIMR, 0xffffffff); 199291738a95SPaulo Zanoni /* 199391738a95SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed 199491738a95SPaulo Zanoni * PCH interrupts. Hence we can't update it after the interrupt handler 199591738a95SPaulo Zanoni * is enabled - instead we unconditionally enable all PCH interrupt 199691738a95SPaulo Zanoni * sources here, but then only unmask them as needed with SDEIMR. 199791738a95SPaulo Zanoni */ 199891738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 199991738a95SPaulo Zanoni POSTING_READ(SDEIER); 200091738a95SPaulo Zanoni } 200191738a95SPaulo Zanoni 2002d18ea1b5SDaniel Vetter static void gen5_gt_irq_preinstall(struct drm_device *dev) 2003d18ea1b5SDaniel Vetter { 2004d18ea1b5SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 2005d18ea1b5SDaniel Vetter 2006d18ea1b5SDaniel Vetter /* and GT */ 2007d18ea1b5SDaniel Vetter I915_WRITE(GTIMR, 0xffffffff); 2008d18ea1b5SDaniel Vetter I915_WRITE(GTIER, 0x0); 2009d18ea1b5SDaniel Vetter POSTING_READ(GTIER); 2010d18ea1b5SDaniel Vetter 2011d18ea1b5SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 2012d18ea1b5SDaniel Vetter /* and PM */ 2013d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 2014d18ea1b5SDaniel Vetter I915_WRITE(GEN6_PMIER, 0x0); 2015d18ea1b5SDaniel Vetter POSTING_READ(GEN6_PMIER); 2016d18ea1b5SDaniel Vetter } 2017d18ea1b5SDaniel Vetter } 2018d18ea1b5SDaniel Vetter 2019c0e09200SDave Airlie /* drm_dma.h hooks 2020c0e09200SDave Airlie */ 2021f71d4af4SJesse Barnes static void ironlake_irq_preinstall(struct drm_device *dev) 2022036a4a7dSZhenyu Wang { 2023036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2024036a4a7dSZhenyu Wang 20254697995bSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 20264697995bSJesse Barnes 2027036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xeffe); 2028bdfcdb63SDaniel Vetter 2029036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2030036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 20313143a2bfSChris Wilson POSTING_READ(DEIER); 2032036a4a7dSZhenyu Wang 2033d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 2034c650156aSZhenyu Wang 203591738a95SPaulo Zanoni ibx_irq_preinstall(dev); 20367d99163dSBen Widawsky } 20377d99163dSBen Widawsky 20387e231dbeSJesse Barnes static void valleyview_irq_preinstall(struct drm_device *dev) 20397e231dbeSJesse Barnes { 20407e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 20417e231dbeSJesse Barnes int pipe; 20427e231dbeSJesse Barnes 20437e231dbeSJesse Barnes atomic_set(&dev_priv->irq_received, 0); 20447e231dbeSJesse Barnes 20457e231dbeSJesse Barnes /* VLV magic */ 20467e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0); 20477e231dbeSJesse Barnes I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 20487e231dbeSJesse Barnes I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 20497e231dbeSJesse Barnes I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 20507e231dbeSJesse Barnes 20517e231dbeSJesse Barnes /* and GT */ 20527e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 20537e231dbeSJesse Barnes I915_WRITE(GTIIR, I915_READ(GTIIR)); 2054d18ea1b5SDaniel Vetter 2055d18ea1b5SDaniel Vetter gen5_gt_irq_preinstall(dev); 20567e231dbeSJesse Barnes 20577e231dbeSJesse Barnes I915_WRITE(DPINVGTT, 0xff); 20587e231dbeSJesse Barnes 20597e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 20607e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 20617e231dbeSJesse Barnes for_each_pipe(pipe) 20627e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 20637e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 20647e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 20657e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 20667e231dbeSJesse Barnes POSTING_READ(VLV_IER); 20677e231dbeSJesse Barnes } 20687e231dbeSJesse Barnes 206982a28bcfSDaniel Vetter static void ibx_hpd_irq_setup(struct drm_device *dev) 207082a28bcfSDaniel Vetter { 207182a28bcfSDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 207282a28bcfSDaniel Vetter struct drm_mode_config *mode_config = &dev->mode_config; 207382a28bcfSDaniel Vetter struct intel_encoder *intel_encoder; 2074fee884edSDaniel Vetter u32 hotplug_irqs, hotplug, enabled_irqs = 0; 207582a28bcfSDaniel Vetter 207682a28bcfSDaniel Vetter if (HAS_PCH_IBX(dev)) { 2077fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 207882a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2079cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2080fee884edSDaniel Vetter enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 208182a28bcfSDaniel Vetter } else { 2082fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 208382a28bcfSDaniel Vetter list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2084cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2085fee884edSDaniel Vetter enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 208682a28bcfSDaniel Vetter } 208782a28bcfSDaniel Vetter 2088fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 208982a28bcfSDaniel Vetter 20907fe0b973SKeith Packard /* 20917fe0b973SKeith Packard * Enable digital hotplug on the PCH, and configure the DP short pulse 20927fe0b973SKeith Packard * duration to 2ms (which is the minimum in the Display Port spec) 20937fe0b973SKeith Packard * 20947fe0b973SKeith Packard * This register is the same on all known PCH chips. 20957fe0b973SKeith Packard */ 20967fe0b973SKeith Packard hotplug = I915_READ(PCH_PORT_HOTPLUG); 20977fe0b973SKeith Packard hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 20987fe0b973SKeith Packard hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 20997fe0b973SKeith Packard hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 21007fe0b973SKeith Packard hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 21017fe0b973SKeith Packard I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 21027fe0b973SKeith Packard } 21037fe0b973SKeith Packard 2104d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 2105d46da437SPaulo Zanoni { 2106d46da437SPaulo Zanoni drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 210782a28bcfSDaniel Vetter u32 mask; 2108d46da437SPaulo Zanoni 2109692a04cfSDaniel Vetter if (HAS_PCH_NOP(dev)) 2110692a04cfSDaniel Vetter return; 2111692a04cfSDaniel Vetter 21128664281bSPaulo Zanoni if (HAS_PCH_IBX(dev)) { 21138664281bSPaulo Zanoni mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2114de032bf4SPaulo Zanoni SDE_TRANSA_FIFO_UNDER | SDE_POISON; 21158664281bSPaulo Zanoni } else { 21168664281bSPaulo Zanoni mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 21178664281bSPaulo Zanoni 21188664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 21198664281bSPaulo Zanoni } 2120ab5c608bSBen Widawsky 2121d46da437SPaulo Zanoni I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2122d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 2123d46da437SPaulo Zanoni } 2124d46da437SPaulo Zanoni 21250a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 21260a9a8c91SDaniel Vetter { 21270a9a8c91SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 21280a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 21290a9a8c91SDaniel Vetter 21300a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 21310a9a8c91SDaniel Vetter 21320a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 21330a9a8c91SDaniel Vetter if (HAS_L3_GPU_CACHE(dev)) { 21340a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 21350a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 21360a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 21370a9a8c91SDaniel Vetter } 21380a9a8c91SDaniel Vetter 21390a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 21400a9a8c91SDaniel Vetter if (IS_GEN5(dev)) { 21410a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 21420a9a8c91SDaniel Vetter ILK_BSD_USER_INTERRUPT; 21430a9a8c91SDaniel Vetter } else { 21440a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 21450a9a8c91SDaniel Vetter } 21460a9a8c91SDaniel Vetter 21470a9a8c91SDaniel Vetter I915_WRITE(GTIIR, I915_READ(GTIIR)); 21480a9a8c91SDaniel Vetter I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 21490a9a8c91SDaniel Vetter I915_WRITE(GTIER, gt_irqs); 21500a9a8c91SDaniel Vetter POSTING_READ(GTIER); 21510a9a8c91SDaniel Vetter 21520a9a8c91SDaniel Vetter if (INTEL_INFO(dev)->gen >= 6) { 21530a9a8c91SDaniel Vetter pm_irqs |= GEN6_PM_RPS_EVENTS; 21540a9a8c91SDaniel Vetter 21550a9a8c91SDaniel Vetter if (HAS_VEBOX(dev)) 21560a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 21570a9a8c91SDaniel Vetter 21580a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 21590a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIMR, 0xffffffff); 21600a9a8c91SDaniel Vetter I915_WRITE(GEN6_PMIER, pm_irqs); 21610a9a8c91SDaniel Vetter POSTING_READ(GEN6_PMIER); 21620a9a8c91SDaniel Vetter } 21630a9a8c91SDaniel Vetter } 21640a9a8c91SDaniel Vetter 2165f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 2166036a4a7dSZhenyu Wang { 21674bc9d430SDaniel Vetter unsigned long irqflags; 2168036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2169*8e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 2170*8e76f8dcSPaulo Zanoni 2171*8e76f8dcSPaulo Zanoni if (INTEL_INFO(dev)->gen >= 7) { 2172*8e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2173*8e76f8dcSPaulo Zanoni DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2174*8e76f8dcSPaulo Zanoni DE_PLANEB_FLIP_DONE_IVB | 2175*8e76f8dcSPaulo Zanoni DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2176*8e76f8dcSPaulo Zanoni DE_ERR_INT_IVB); 2177*8e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2178*8e76f8dcSPaulo Zanoni DE_PIPEA_VBLANK_IVB); 2179*8e76f8dcSPaulo Zanoni 2180*8e76f8dcSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2181*8e76f8dcSPaulo Zanoni } else { 2182*8e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2183ce99c256SDaniel Vetter DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 21848664281bSPaulo Zanoni DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | 2185*8e76f8dcSPaulo Zanoni DE_PIPEA_FIFO_UNDERRUN | DE_POISON); 2186*8e76f8dcSPaulo Zanoni extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2187*8e76f8dcSPaulo Zanoni } 2188036a4a7dSZhenyu Wang 21891ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 2190036a4a7dSZhenyu Wang 2191036a4a7dSZhenyu Wang /* should always can generate irq */ 2192036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 21931ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 2194*8e76f8dcSPaulo Zanoni I915_WRITE(DEIER, display_mask | extra_mask); 21953143a2bfSChris Wilson POSTING_READ(DEIER); 2196036a4a7dSZhenyu Wang 21970a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 2198036a4a7dSZhenyu Wang 2199d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 22007fe0b973SKeith Packard 2201f97108d1SJesse Barnes if (IS_IRONLAKE_M(dev)) { 22026005ce42SDaniel Vetter /* Enable PCU event interrupts 22036005ce42SDaniel Vetter * 22046005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 22054bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 22064bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 22074bc9d430SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2208f97108d1SJesse Barnes ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 22094bc9d430SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2210f97108d1SJesse Barnes } 2211f97108d1SJesse Barnes 2212036a4a7dSZhenyu Wang return 0; 2213036a4a7dSZhenyu Wang } 2214036a4a7dSZhenyu Wang 22157e231dbeSJesse Barnes static int valleyview_irq_postinstall(struct drm_device *dev) 22167e231dbeSJesse Barnes { 22177e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22187e231dbeSJesse Barnes u32 enable_mask; 221931acc7f5SJesse Barnes u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2220b79480baSDaniel Vetter unsigned long irqflags; 22217e231dbeSJesse Barnes 22227e231dbeSJesse Barnes enable_mask = I915_DISPLAY_PORT_INTERRUPT; 222331acc7f5SJesse Barnes enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 222431acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 222531acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 22267e231dbeSJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22277e231dbeSJesse Barnes 222831acc7f5SJesse Barnes /* 222931acc7f5SJesse Barnes *Leave vblank interrupts masked initially. enable/disable will 223031acc7f5SJesse Barnes * toggle them based on usage. 223131acc7f5SJesse Barnes */ 223231acc7f5SJesse Barnes dev_priv->irq_mask = (~enable_mask) | 223331acc7f5SJesse Barnes I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 223431acc7f5SJesse Barnes I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 22357e231dbeSJesse Barnes 223620afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 223720afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 223820afbda2SDaniel Vetter 22397e231dbeSJesse Barnes I915_WRITE(VLV_IMR, dev_priv->irq_mask); 22407e231dbeSJesse Barnes I915_WRITE(VLV_IER, enable_mask); 22417e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 22427e231dbeSJesse Barnes I915_WRITE(PIPESTAT(0), 0xffff); 22437e231dbeSJesse Barnes I915_WRITE(PIPESTAT(1), 0xffff); 22447e231dbeSJesse Barnes POSTING_READ(VLV_IER); 22457e231dbeSJesse Barnes 2246b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2247b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2248b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 224931acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2250515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 225131acc7f5SJesse Barnes i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2252b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 225331acc7f5SJesse Barnes 22547e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 22557e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 22567e231dbeSJesse Barnes 22570a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 22587e231dbeSJesse Barnes 22597e231dbeSJesse Barnes /* ack & enable invalid PTE error interrupts */ 22607e231dbeSJesse Barnes #if 0 /* FIXME: add support to irq handler for checking these bits */ 22617e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 22627e231dbeSJesse Barnes I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 22637e231dbeSJesse Barnes #endif 22647e231dbeSJesse Barnes 22657e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 226620afbda2SDaniel Vetter 226720afbda2SDaniel Vetter return 0; 226820afbda2SDaniel Vetter } 226920afbda2SDaniel Vetter 22707e231dbeSJesse Barnes static void valleyview_irq_uninstall(struct drm_device *dev) 22717e231dbeSJesse Barnes { 22727e231dbeSJesse Barnes drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22737e231dbeSJesse Barnes int pipe; 22747e231dbeSJesse Barnes 22757e231dbeSJesse Barnes if (!dev_priv) 22767e231dbeSJesse Barnes return; 22777e231dbeSJesse Barnes 2278ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2279ac4c16c5SEgbert Eich 22807e231dbeSJesse Barnes for_each_pipe(pipe) 22817e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 22827e231dbeSJesse Barnes 22837e231dbeSJesse Barnes I915_WRITE(HWSTAM, 0xffffffff); 22847e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_EN, 0); 22857e231dbeSJesse Barnes I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 22867e231dbeSJesse Barnes for_each_pipe(pipe) 22877e231dbeSJesse Barnes I915_WRITE(PIPESTAT(pipe), 0xffff); 22887e231dbeSJesse Barnes I915_WRITE(VLV_IIR, 0xffffffff); 22897e231dbeSJesse Barnes I915_WRITE(VLV_IMR, 0xffffffff); 22907e231dbeSJesse Barnes I915_WRITE(VLV_IER, 0x0); 22917e231dbeSJesse Barnes POSTING_READ(VLV_IER); 22927e231dbeSJesse Barnes } 22937e231dbeSJesse Barnes 2294f71d4af4SJesse Barnes static void ironlake_irq_uninstall(struct drm_device *dev) 2295036a4a7dSZhenyu Wang { 2296036a4a7dSZhenyu Wang drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 22974697995bSJesse Barnes 22984697995bSJesse Barnes if (!dev_priv) 22994697995bSJesse Barnes return; 23004697995bSJesse Barnes 2301ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2302ac4c16c5SEgbert Eich 2303036a4a7dSZhenyu Wang I915_WRITE(HWSTAM, 0xffffffff); 2304036a4a7dSZhenyu Wang 2305036a4a7dSZhenyu Wang I915_WRITE(DEIMR, 0xffffffff); 2306036a4a7dSZhenyu Wang I915_WRITE(DEIER, 0x0); 2307036a4a7dSZhenyu Wang I915_WRITE(DEIIR, I915_READ(DEIIR)); 23088664281bSPaulo Zanoni if (IS_GEN7(dev)) 23098664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2310036a4a7dSZhenyu Wang 2311036a4a7dSZhenyu Wang I915_WRITE(GTIMR, 0xffffffff); 2312036a4a7dSZhenyu Wang I915_WRITE(GTIER, 0x0); 2313036a4a7dSZhenyu Wang I915_WRITE(GTIIR, I915_READ(GTIIR)); 2314192aac1fSKeith Packard 2315ab5c608bSBen Widawsky if (HAS_PCH_NOP(dev)) 2316ab5c608bSBen Widawsky return; 2317ab5c608bSBen Widawsky 2318192aac1fSKeith Packard I915_WRITE(SDEIMR, 0xffffffff); 2319192aac1fSKeith Packard I915_WRITE(SDEIER, 0x0); 2320192aac1fSKeith Packard I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 23218664281bSPaulo Zanoni if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 23228664281bSPaulo Zanoni I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2323036a4a7dSZhenyu Wang } 2324036a4a7dSZhenyu Wang 2325c2798b19SChris Wilson static void i8xx_irq_preinstall(struct drm_device * dev) 2326c2798b19SChris Wilson { 2327c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2328c2798b19SChris Wilson int pipe; 2329c2798b19SChris Wilson 2330c2798b19SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2331c2798b19SChris Wilson 2332c2798b19SChris Wilson for_each_pipe(pipe) 2333c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2334c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2335c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2336c2798b19SChris Wilson POSTING_READ16(IER); 2337c2798b19SChris Wilson } 2338c2798b19SChris Wilson 2339c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 2340c2798b19SChris Wilson { 2341c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2342c2798b19SChris Wilson 2343c2798b19SChris Wilson I915_WRITE16(EMR, 2344c2798b19SChris Wilson ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2345c2798b19SChris Wilson 2346c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 2347c2798b19SChris Wilson dev_priv->irq_mask = 2348c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2349c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2350c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2351c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2352c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2353c2798b19SChris Wilson I915_WRITE16(IMR, dev_priv->irq_mask); 2354c2798b19SChris Wilson 2355c2798b19SChris Wilson I915_WRITE16(IER, 2356c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2357c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2358c2798b19SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2359c2798b19SChris Wilson I915_USER_INTERRUPT); 2360c2798b19SChris Wilson POSTING_READ16(IER); 2361c2798b19SChris Wilson 2362c2798b19SChris Wilson return 0; 2363c2798b19SChris Wilson } 2364c2798b19SChris Wilson 236590a72f87SVille Syrjälä /* 236690a72f87SVille Syrjälä * Returns true when a page flip has completed. 236790a72f87SVille Syrjälä */ 236890a72f87SVille Syrjälä static bool i8xx_handle_vblank(struct drm_device *dev, 236990a72f87SVille Syrjälä int pipe, u16 iir) 237090a72f87SVille Syrjälä { 237190a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 237290a72f87SVille Syrjälä u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); 237390a72f87SVille Syrjälä 237490a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 237590a72f87SVille Syrjälä return false; 237690a72f87SVille Syrjälä 237790a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 237890a72f87SVille Syrjälä return false; 237990a72f87SVille Syrjälä 238090a72f87SVille Syrjälä intel_prepare_page_flip(dev, pipe); 238190a72f87SVille Syrjälä 238290a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 238390a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 238490a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 238590a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 238690a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 238790a72f87SVille Syrjälä */ 238890a72f87SVille Syrjälä if (I915_READ16(ISR) & flip_pending) 238990a72f87SVille Syrjälä return false; 239090a72f87SVille Syrjälä 239190a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 239290a72f87SVille Syrjälä 239390a72f87SVille Syrjälä return true; 239490a72f87SVille Syrjälä } 239590a72f87SVille Syrjälä 2396ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 2397c2798b19SChris Wilson { 2398c2798b19SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2399c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2400c2798b19SChris Wilson u16 iir, new_iir; 2401c2798b19SChris Wilson u32 pipe_stats[2]; 2402c2798b19SChris Wilson unsigned long irqflags; 2403c2798b19SChris Wilson int irq_received; 2404c2798b19SChris Wilson int pipe; 2405c2798b19SChris Wilson u16 flip_mask = 2406c2798b19SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2407c2798b19SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2408c2798b19SChris Wilson 2409c2798b19SChris Wilson atomic_inc(&dev_priv->irq_received); 2410c2798b19SChris Wilson 2411c2798b19SChris Wilson iir = I915_READ16(IIR); 2412c2798b19SChris Wilson if (iir == 0) 2413c2798b19SChris Wilson return IRQ_NONE; 2414c2798b19SChris Wilson 2415c2798b19SChris Wilson while (iir & ~flip_mask) { 2416c2798b19SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2417c2798b19SChris Wilson * have been cleared after the pipestat interrupt was received. 2418c2798b19SChris Wilson * It doesn't set the bit in iir again, but it still produces 2419c2798b19SChris Wilson * interrupts (for non-MSI). 2420c2798b19SChris Wilson */ 2421c2798b19SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2422c2798b19SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2423c2798b19SChris Wilson i915_handle_error(dev, false); 2424c2798b19SChris Wilson 2425c2798b19SChris Wilson for_each_pipe(pipe) { 2426c2798b19SChris Wilson int reg = PIPESTAT(pipe); 2427c2798b19SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2428c2798b19SChris Wilson 2429c2798b19SChris Wilson /* 2430c2798b19SChris Wilson * Clear the PIPE*STAT regs before the IIR 2431c2798b19SChris Wilson */ 2432c2798b19SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2433c2798b19SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2434c2798b19SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2435c2798b19SChris Wilson pipe_name(pipe)); 2436c2798b19SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2437c2798b19SChris Wilson irq_received = 1; 2438c2798b19SChris Wilson } 2439c2798b19SChris Wilson } 2440c2798b19SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2441c2798b19SChris Wilson 2442c2798b19SChris Wilson I915_WRITE16(IIR, iir & ~flip_mask); 2443c2798b19SChris Wilson new_iir = I915_READ16(IIR); /* Flush posted writes */ 2444c2798b19SChris Wilson 2445d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 2446c2798b19SChris Wilson 2447c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 2448c2798b19SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2449c2798b19SChris Wilson 2450c2798b19SChris Wilson if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 245190a72f87SVille Syrjälä i8xx_handle_vblank(dev, 0, iir)) 245290a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); 2453c2798b19SChris Wilson 2454c2798b19SChris Wilson if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 245590a72f87SVille Syrjälä i8xx_handle_vblank(dev, 1, iir)) 245690a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); 2457c2798b19SChris Wilson 2458c2798b19SChris Wilson iir = new_iir; 2459c2798b19SChris Wilson } 2460c2798b19SChris Wilson 2461c2798b19SChris Wilson return IRQ_HANDLED; 2462c2798b19SChris Wilson } 2463c2798b19SChris Wilson 2464c2798b19SChris Wilson static void i8xx_irq_uninstall(struct drm_device * dev) 2465c2798b19SChris Wilson { 2466c2798b19SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2467c2798b19SChris Wilson int pipe; 2468c2798b19SChris Wilson 2469c2798b19SChris Wilson for_each_pipe(pipe) { 2470c2798b19SChris Wilson /* Clear enable bits; then clear status bits */ 2471c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2472c2798b19SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2473c2798b19SChris Wilson } 2474c2798b19SChris Wilson I915_WRITE16(IMR, 0xffff); 2475c2798b19SChris Wilson I915_WRITE16(IER, 0x0); 2476c2798b19SChris Wilson I915_WRITE16(IIR, I915_READ16(IIR)); 2477c2798b19SChris Wilson } 2478c2798b19SChris Wilson 2479a266c7d5SChris Wilson static void i915_irq_preinstall(struct drm_device * dev) 2480a266c7d5SChris Wilson { 2481a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2482a266c7d5SChris Wilson int pipe; 2483a266c7d5SChris Wilson 2484a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2485a266c7d5SChris Wilson 2486a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2487a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2488a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2489a266c7d5SChris Wilson } 2490a266c7d5SChris Wilson 249100d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xeffe); 2492a266c7d5SChris Wilson for_each_pipe(pipe) 2493a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2494a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2495a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2496a266c7d5SChris Wilson POSTING_READ(IER); 2497a266c7d5SChris Wilson } 2498a266c7d5SChris Wilson 2499a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 2500a266c7d5SChris Wilson { 2501a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 250238bde180SChris Wilson u32 enable_mask; 2503a266c7d5SChris Wilson 250438bde180SChris Wilson I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 250538bde180SChris Wilson 250638bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 250738bde180SChris Wilson dev_priv->irq_mask = 250838bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 250938bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 251038bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 251138bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 251238bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 251338bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 251438bde180SChris Wilson 251538bde180SChris Wilson enable_mask = 251638bde180SChris Wilson I915_ASLE_INTERRUPT | 251738bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 251838bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 251938bde180SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 252038bde180SChris Wilson I915_USER_INTERRUPT; 252138bde180SChris Wilson 2522a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 252320afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 252420afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 252520afbda2SDaniel Vetter 2526a266c7d5SChris Wilson /* Enable in IER... */ 2527a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2528a266c7d5SChris Wilson /* and unmask in IMR */ 2529a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2530a266c7d5SChris Wilson } 2531a266c7d5SChris Wilson 2532a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2533a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2534a266c7d5SChris Wilson POSTING_READ(IER); 2535a266c7d5SChris Wilson 2536f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 253720afbda2SDaniel Vetter 253820afbda2SDaniel Vetter return 0; 253920afbda2SDaniel Vetter } 254020afbda2SDaniel Vetter 254190a72f87SVille Syrjälä /* 254290a72f87SVille Syrjälä * Returns true when a page flip has completed. 254390a72f87SVille Syrjälä */ 254490a72f87SVille Syrjälä static bool i915_handle_vblank(struct drm_device *dev, 254590a72f87SVille Syrjälä int plane, int pipe, u32 iir) 254690a72f87SVille Syrjälä { 254790a72f87SVille Syrjälä drm_i915_private_t *dev_priv = dev->dev_private; 254890a72f87SVille Syrjälä u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 254990a72f87SVille Syrjälä 255090a72f87SVille Syrjälä if (!drm_handle_vblank(dev, pipe)) 255190a72f87SVille Syrjälä return false; 255290a72f87SVille Syrjälä 255390a72f87SVille Syrjälä if ((iir & flip_pending) == 0) 255490a72f87SVille Syrjälä return false; 255590a72f87SVille Syrjälä 255690a72f87SVille Syrjälä intel_prepare_page_flip(dev, plane); 255790a72f87SVille Syrjälä 255890a72f87SVille Syrjälä /* We detect FlipDone by looking for the change in PendingFlip from '1' 255990a72f87SVille Syrjälä * to '0' on the following vblank, i.e. IIR has the Pendingflip 256090a72f87SVille Syrjälä * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 256190a72f87SVille Syrjälä * the flip is completed (no longer pending). Since this doesn't raise 256290a72f87SVille Syrjälä * an interrupt per se, we watch for the change at vblank. 256390a72f87SVille Syrjälä */ 256490a72f87SVille Syrjälä if (I915_READ(ISR) & flip_pending) 256590a72f87SVille Syrjälä return false; 256690a72f87SVille Syrjälä 256790a72f87SVille Syrjälä intel_finish_page_flip(dev, pipe); 256890a72f87SVille Syrjälä 256990a72f87SVille Syrjälä return true; 257090a72f87SVille Syrjälä } 257190a72f87SVille Syrjälä 2572ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 2573a266c7d5SChris Wilson { 2574a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2575a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 25768291ee90SChris Wilson u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2577a266c7d5SChris Wilson unsigned long irqflags; 257838bde180SChris Wilson u32 flip_mask = 257938bde180SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 258038bde180SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 258138bde180SChris Wilson int pipe, ret = IRQ_NONE; 2582a266c7d5SChris Wilson 2583a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2584a266c7d5SChris Wilson 2585a266c7d5SChris Wilson iir = I915_READ(IIR); 258638bde180SChris Wilson do { 258738bde180SChris Wilson bool irq_received = (iir & ~flip_mask) != 0; 25888291ee90SChris Wilson bool blc_event = false; 2589a266c7d5SChris Wilson 2590a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2591a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2592a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2593a266c7d5SChris Wilson * interrupts (for non-MSI). 2594a266c7d5SChris Wilson */ 2595a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2596a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2597a266c7d5SChris Wilson i915_handle_error(dev, false); 2598a266c7d5SChris Wilson 2599a266c7d5SChris Wilson for_each_pipe(pipe) { 2600a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2601a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2602a266c7d5SChris Wilson 260338bde180SChris Wilson /* Clear the PIPE*STAT regs before the IIR */ 2604a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2605a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2606a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2607a266c7d5SChris Wilson pipe_name(pipe)); 2608a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 260938bde180SChris Wilson irq_received = true; 2610a266c7d5SChris Wilson } 2611a266c7d5SChris Wilson } 2612a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2613a266c7d5SChris Wilson 2614a266c7d5SChris Wilson if (!irq_received) 2615a266c7d5SChris Wilson break; 2616a266c7d5SChris Wilson 2617a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2618a266c7d5SChris Wilson if ((I915_HAS_HOTPLUG(dev)) && 2619a266c7d5SChris Wilson (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2620a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2621b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2622a266c7d5SChris Wilson 2623a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2624a266c7d5SChris Wilson hotplug_status); 262591d131d2SDaniel Vetter 262610a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 262791d131d2SDaniel Vetter 2628a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 262938bde180SChris Wilson POSTING_READ(PORT_HOTPLUG_STAT); 2630a266c7d5SChris Wilson } 2631a266c7d5SChris Wilson 263238bde180SChris Wilson I915_WRITE(IIR, iir & ~flip_mask); 2633a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2634a266c7d5SChris Wilson 2635a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2636a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2637a266c7d5SChris Wilson 2638a266c7d5SChris Wilson for_each_pipe(pipe) { 263938bde180SChris Wilson int plane = pipe; 264038bde180SChris Wilson if (IS_MOBILE(dev)) 264138bde180SChris Wilson plane = !plane; 26425e2032d4SVille Syrjälä 264390a72f87SVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 264490a72f87SVille Syrjälä i915_handle_vblank(dev, plane, pipe, iir)) 264590a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 2646a266c7d5SChris Wilson 2647a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2648a266c7d5SChris Wilson blc_event = true; 2649a266c7d5SChris Wilson } 2650a266c7d5SChris Wilson 2651a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2652a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2653a266c7d5SChris Wilson 2654a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2655a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2656a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2657a266c7d5SChris Wilson * we would never get another interrupt. 2658a266c7d5SChris Wilson * 2659a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2660a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2661a266c7d5SChris Wilson * another one. 2662a266c7d5SChris Wilson * 2663a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2664a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2665a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2666a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2667a266c7d5SChris Wilson * stray interrupts. 2668a266c7d5SChris Wilson */ 266938bde180SChris Wilson ret = IRQ_HANDLED; 2670a266c7d5SChris Wilson iir = new_iir; 267138bde180SChris Wilson } while (iir & ~flip_mask); 2672a266c7d5SChris Wilson 2673d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 26748291ee90SChris Wilson 2675a266c7d5SChris Wilson return ret; 2676a266c7d5SChris Wilson } 2677a266c7d5SChris Wilson 2678a266c7d5SChris Wilson static void i915_irq_uninstall(struct drm_device * dev) 2679a266c7d5SChris Wilson { 2680a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2681a266c7d5SChris Wilson int pipe; 2682a266c7d5SChris Wilson 2683ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2684ac4c16c5SEgbert Eich 2685a266c7d5SChris Wilson if (I915_HAS_HOTPLUG(dev)) { 2686a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2687a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2688a266c7d5SChris Wilson } 2689a266c7d5SChris Wilson 269000d98ebdSChris Wilson I915_WRITE16(HWSTAM, 0xffff); 269155b39755SChris Wilson for_each_pipe(pipe) { 269255b39755SChris Wilson /* Clear enable bits; then clear status bits */ 2693a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 269455b39755SChris Wilson I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 269555b39755SChris Wilson } 2696a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2697a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2698a266c7d5SChris Wilson 2699a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 2700a266c7d5SChris Wilson } 2701a266c7d5SChris Wilson 2702a266c7d5SChris Wilson static void i965_irq_preinstall(struct drm_device * dev) 2703a266c7d5SChris Wilson { 2704a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2705a266c7d5SChris Wilson int pipe; 2706a266c7d5SChris Wilson 2707a266c7d5SChris Wilson atomic_set(&dev_priv->irq_received, 0); 2708a266c7d5SChris Wilson 2709a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2710a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2711a266c7d5SChris Wilson 2712a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xeffe); 2713a266c7d5SChris Wilson for_each_pipe(pipe) 2714a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2715a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2716a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2717a266c7d5SChris Wilson POSTING_READ(IER); 2718a266c7d5SChris Wilson } 2719a266c7d5SChris Wilson 2720a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 2721a266c7d5SChris Wilson { 2722a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2723bbba0a97SChris Wilson u32 enable_mask; 2724a266c7d5SChris Wilson u32 error_mask; 2725b79480baSDaniel Vetter unsigned long irqflags; 2726a266c7d5SChris Wilson 2727a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 2728bbba0a97SChris Wilson dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2729adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 2730bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2731bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2732bbba0a97SChris Wilson I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2733bbba0a97SChris Wilson I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2734bbba0a97SChris Wilson I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2735bbba0a97SChris Wilson 2736bbba0a97SChris Wilson enable_mask = ~dev_priv->irq_mask; 273721ad8330SVille Syrjälä enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 273821ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 2739bbba0a97SChris Wilson enable_mask |= I915_USER_INTERRUPT; 2740bbba0a97SChris Wilson 2741bbba0a97SChris Wilson if (IS_G4X(dev)) 2742bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 2743a266c7d5SChris Wilson 2744b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 2745b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 2746b79480baSDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2747515ac2bbSDaniel Vetter i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2748b79480baSDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2749a266c7d5SChris Wilson 2750a266c7d5SChris Wilson /* 2751a266c7d5SChris Wilson * Enable some error detection, note the instruction error mask 2752a266c7d5SChris Wilson * bit is reserved, so we leave it masked. 2753a266c7d5SChris Wilson */ 2754a266c7d5SChris Wilson if (IS_G4X(dev)) { 2755a266c7d5SChris Wilson error_mask = ~(GM45_ERROR_PAGE_TABLE | 2756a266c7d5SChris Wilson GM45_ERROR_MEM_PRIV | 2757a266c7d5SChris Wilson GM45_ERROR_CP_PRIV | 2758a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2759a266c7d5SChris Wilson } else { 2760a266c7d5SChris Wilson error_mask = ~(I915_ERROR_PAGE_TABLE | 2761a266c7d5SChris Wilson I915_ERROR_MEMORY_REFRESH); 2762a266c7d5SChris Wilson } 2763a266c7d5SChris Wilson I915_WRITE(EMR, error_mask); 2764a266c7d5SChris Wilson 2765a266c7d5SChris Wilson I915_WRITE(IMR, dev_priv->irq_mask); 2766a266c7d5SChris Wilson I915_WRITE(IER, enable_mask); 2767a266c7d5SChris Wilson POSTING_READ(IER); 2768a266c7d5SChris Wilson 276920afbda2SDaniel Vetter I915_WRITE(PORT_HOTPLUG_EN, 0); 277020afbda2SDaniel Vetter POSTING_READ(PORT_HOTPLUG_EN); 277120afbda2SDaniel Vetter 2772f49e38ddSJani Nikula i915_enable_asle_pipestat(dev); 277320afbda2SDaniel Vetter 277420afbda2SDaniel Vetter return 0; 277520afbda2SDaniel Vetter } 277620afbda2SDaniel Vetter 2777bac56d5bSEgbert Eich static void i915_hpd_irq_setup(struct drm_device *dev) 277820afbda2SDaniel Vetter { 277920afbda2SDaniel Vetter drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2780e5868a31SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 2781cd569aedSEgbert Eich struct intel_encoder *intel_encoder; 278220afbda2SDaniel Vetter u32 hotplug_en; 278320afbda2SDaniel Vetter 2784b5ea2d56SDaniel Vetter assert_spin_locked(&dev_priv->irq_lock); 2785b5ea2d56SDaniel Vetter 2786bac56d5bSEgbert Eich if (I915_HAS_HOTPLUG(dev)) { 2787bac56d5bSEgbert Eich hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2788bac56d5bSEgbert Eich hotplug_en &= ~HOTPLUG_INT_EN_MASK; 2789adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 2790e5868a31SEgbert Eich /* enable bits are the same for all generations */ 2791cd569aedSEgbert Eich list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2792cd569aedSEgbert Eich if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2793cd569aedSEgbert Eich hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 2794a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 2795a266c7d5SChris Wilson to generate a spurious hotplug event about three 2796a266c7d5SChris Wilson seconds later. So just do it once. 2797a266c7d5SChris Wilson */ 2798a266c7d5SChris Wilson if (IS_G4X(dev)) 2799a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 280085fc95baSDaniel Vetter hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 2801a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2802a266c7d5SChris Wilson 2803a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 2804a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2805a266c7d5SChris Wilson } 2806bac56d5bSEgbert Eich } 2807a266c7d5SChris Wilson 2808ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 2809a266c7d5SChris Wilson { 2810a266c7d5SChris Wilson struct drm_device *dev = (struct drm_device *) arg; 2811a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2812a266c7d5SChris Wilson u32 iir, new_iir; 2813a266c7d5SChris Wilson u32 pipe_stats[I915_MAX_PIPES]; 2814a266c7d5SChris Wilson unsigned long irqflags; 2815a266c7d5SChris Wilson int irq_received; 2816a266c7d5SChris Wilson int ret = IRQ_NONE, pipe; 281721ad8330SVille Syrjälä u32 flip_mask = 281821ad8330SVille Syrjälä I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 281921ad8330SVille Syrjälä I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2820a266c7d5SChris Wilson 2821a266c7d5SChris Wilson atomic_inc(&dev_priv->irq_received); 2822a266c7d5SChris Wilson 2823a266c7d5SChris Wilson iir = I915_READ(IIR); 2824a266c7d5SChris Wilson 2825a266c7d5SChris Wilson for (;;) { 28262c8ba29fSChris Wilson bool blc_event = false; 28272c8ba29fSChris Wilson 282821ad8330SVille Syrjälä irq_received = (iir & ~flip_mask) != 0; 2829a266c7d5SChris Wilson 2830a266c7d5SChris Wilson /* Can't rely on pipestat interrupt bit in iir as it might 2831a266c7d5SChris Wilson * have been cleared after the pipestat interrupt was received. 2832a266c7d5SChris Wilson * It doesn't set the bit in iir again, but it still produces 2833a266c7d5SChris Wilson * interrupts (for non-MSI). 2834a266c7d5SChris Wilson */ 2835a266c7d5SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2836a266c7d5SChris Wilson if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2837a266c7d5SChris Wilson i915_handle_error(dev, false); 2838a266c7d5SChris Wilson 2839a266c7d5SChris Wilson for_each_pipe(pipe) { 2840a266c7d5SChris Wilson int reg = PIPESTAT(pipe); 2841a266c7d5SChris Wilson pipe_stats[pipe] = I915_READ(reg); 2842a266c7d5SChris Wilson 2843a266c7d5SChris Wilson /* 2844a266c7d5SChris Wilson * Clear the PIPE*STAT regs before the IIR 2845a266c7d5SChris Wilson */ 2846a266c7d5SChris Wilson if (pipe_stats[pipe] & 0x8000ffff) { 2847a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2848a266c7d5SChris Wilson DRM_DEBUG_DRIVER("pipe %c underrun\n", 2849a266c7d5SChris Wilson pipe_name(pipe)); 2850a266c7d5SChris Wilson I915_WRITE(reg, pipe_stats[pipe]); 2851a266c7d5SChris Wilson irq_received = 1; 2852a266c7d5SChris Wilson } 2853a266c7d5SChris Wilson } 2854a266c7d5SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2855a266c7d5SChris Wilson 2856a266c7d5SChris Wilson if (!irq_received) 2857a266c7d5SChris Wilson break; 2858a266c7d5SChris Wilson 2859a266c7d5SChris Wilson ret = IRQ_HANDLED; 2860a266c7d5SChris Wilson 2861a266c7d5SChris Wilson /* Consume port. Then clear IIR or we'll miss events */ 2862adca4730SChris Wilson if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2863a266c7d5SChris Wilson u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2864b543fb04SEgbert Eich u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 2865b543fb04SEgbert Eich HOTPLUG_INT_STATUS_G4X : 28664f7fd709SDaniel Vetter HOTPLUG_INT_STATUS_I915); 2867a266c7d5SChris Wilson 2868a266c7d5SChris Wilson DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2869a266c7d5SChris Wilson hotplug_status); 287091d131d2SDaniel Vetter 287110a504deSDaniel Vetter intel_hpd_irq_handler(dev, hotplug_trigger, 287210a504deSDaniel Vetter IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); 287391d131d2SDaniel Vetter 2874a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2875a266c7d5SChris Wilson I915_READ(PORT_HOTPLUG_STAT); 2876a266c7d5SChris Wilson } 2877a266c7d5SChris Wilson 287821ad8330SVille Syrjälä I915_WRITE(IIR, iir & ~flip_mask); 2879a266c7d5SChris Wilson new_iir = I915_READ(IIR); /* Flush posted writes */ 2880a266c7d5SChris Wilson 2881a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 2882a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[RCS]); 2883a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 2884a266c7d5SChris Wilson notify_ring(dev, &dev_priv->ring[VCS]); 2885a266c7d5SChris Wilson 2886a266c7d5SChris Wilson for_each_pipe(pipe) { 28872c8ba29fSChris Wilson if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 288890a72f87SVille Syrjälä i915_handle_vblank(dev, pipe, pipe, iir)) 288990a72f87SVille Syrjälä flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 2890a266c7d5SChris Wilson 2891a266c7d5SChris Wilson if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2892a266c7d5SChris Wilson blc_event = true; 2893a266c7d5SChris Wilson } 2894a266c7d5SChris Wilson 2895a266c7d5SChris Wilson 2896a266c7d5SChris Wilson if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2897a266c7d5SChris Wilson intel_opregion_asle_intr(dev); 2898a266c7d5SChris Wilson 2899515ac2bbSDaniel Vetter if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2900515ac2bbSDaniel Vetter gmbus_irq_handler(dev); 2901515ac2bbSDaniel Vetter 2902a266c7d5SChris Wilson /* With MSI, interrupts are only generated when iir 2903a266c7d5SChris Wilson * transitions from zero to nonzero. If another bit got 2904a266c7d5SChris Wilson * set while we were handling the existing iir bits, then 2905a266c7d5SChris Wilson * we would never get another interrupt. 2906a266c7d5SChris Wilson * 2907a266c7d5SChris Wilson * This is fine on non-MSI as well, as if we hit this path 2908a266c7d5SChris Wilson * we avoid exiting the interrupt handler only to generate 2909a266c7d5SChris Wilson * another one. 2910a266c7d5SChris Wilson * 2911a266c7d5SChris Wilson * Note that for MSI this could cause a stray interrupt report 2912a266c7d5SChris Wilson * if an interrupt landed in the time between writing IIR and 2913a266c7d5SChris Wilson * the posting read. This should be rare enough to never 2914a266c7d5SChris Wilson * trigger the 99% of 100,000 interrupts test for disabling 2915a266c7d5SChris Wilson * stray interrupts. 2916a266c7d5SChris Wilson */ 2917a266c7d5SChris Wilson iir = new_iir; 2918a266c7d5SChris Wilson } 2919a266c7d5SChris Wilson 2920d05c617eSDaniel Vetter i915_update_dri1_breadcrumb(dev); 29212c8ba29fSChris Wilson 2922a266c7d5SChris Wilson return ret; 2923a266c7d5SChris Wilson } 2924a266c7d5SChris Wilson 2925a266c7d5SChris Wilson static void i965_irq_uninstall(struct drm_device * dev) 2926a266c7d5SChris Wilson { 2927a266c7d5SChris Wilson drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2928a266c7d5SChris Wilson int pipe; 2929a266c7d5SChris Wilson 2930a266c7d5SChris Wilson if (!dev_priv) 2931a266c7d5SChris Wilson return; 2932a266c7d5SChris Wilson 2933ac4c16c5SEgbert Eich del_timer_sync(&dev_priv->hotplug_reenable_timer); 2934ac4c16c5SEgbert Eich 2935a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_EN, 0); 2936a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2937a266c7d5SChris Wilson 2938a266c7d5SChris Wilson I915_WRITE(HWSTAM, 0xffffffff); 2939a266c7d5SChris Wilson for_each_pipe(pipe) 2940a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 0); 2941a266c7d5SChris Wilson I915_WRITE(IMR, 0xffffffff); 2942a266c7d5SChris Wilson I915_WRITE(IER, 0x0); 2943a266c7d5SChris Wilson 2944a266c7d5SChris Wilson for_each_pipe(pipe) 2945a266c7d5SChris Wilson I915_WRITE(PIPESTAT(pipe), 2946a266c7d5SChris Wilson I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2947a266c7d5SChris Wilson I915_WRITE(IIR, I915_READ(IIR)); 2948a266c7d5SChris Wilson } 2949a266c7d5SChris Wilson 2950ac4c16c5SEgbert Eich static void i915_reenable_hotplug_timer_func(unsigned long data) 2951ac4c16c5SEgbert Eich { 2952ac4c16c5SEgbert Eich drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 2953ac4c16c5SEgbert Eich struct drm_device *dev = dev_priv->dev; 2954ac4c16c5SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 2955ac4c16c5SEgbert Eich unsigned long irqflags; 2956ac4c16c5SEgbert Eich int i; 2957ac4c16c5SEgbert Eich 2958ac4c16c5SEgbert Eich spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2959ac4c16c5SEgbert Eich for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 2960ac4c16c5SEgbert Eich struct drm_connector *connector; 2961ac4c16c5SEgbert Eich 2962ac4c16c5SEgbert Eich if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 2963ac4c16c5SEgbert Eich continue; 2964ac4c16c5SEgbert Eich 2965ac4c16c5SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 2966ac4c16c5SEgbert Eich 2967ac4c16c5SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 2968ac4c16c5SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 2969ac4c16c5SEgbert Eich 2970ac4c16c5SEgbert Eich if (intel_connector->encoder->hpd_pin == i) { 2971ac4c16c5SEgbert Eich if (connector->polled != intel_connector->polled) 2972ac4c16c5SEgbert Eich DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 2973ac4c16c5SEgbert Eich drm_get_connector_name(connector)); 2974ac4c16c5SEgbert Eich connector->polled = intel_connector->polled; 2975ac4c16c5SEgbert Eich if (!connector->polled) 2976ac4c16c5SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 2977ac4c16c5SEgbert Eich } 2978ac4c16c5SEgbert Eich } 2979ac4c16c5SEgbert Eich } 2980ac4c16c5SEgbert Eich if (dev_priv->display.hpd_irq_setup) 2981ac4c16c5SEgbert Eich dev_priv->display.hpd_irq_setup(dev); 2982ac4c16c5SEgbert Eich spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2983ac4c16c5SEgbert Eich } 2984ac4c16c5SEgbert Eich 2985f71d4af4SJesse Barnes void intel_irq_init(struct drm_device *dev) 2986f71d4af4SJesse Barnes { 29878b2e326dSChris Wilson struct drm_i915_private *dev_priv = dev->dev_private; 29888b2e326dSChris Wilson 29898b2e326dSChris Wilson INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 299099584db3SDaniel Vetter INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 2991c6a828d3SDaniel Vetter INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2992a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 29938b2e326dSChris Wilson 299499584db3SDaniel Vetter setup_timer(&dev_priv->gpu_error.hangcheck_timer, 299599584db3SDaniel Vetter i915_hangcheck_elapsed, 299661bac78eSDaniel Vetter (unsigned long) dev); 2997ac4c16c5SEgbert Eich setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 2998ac4c16c5SEgbert Eich (unsigned long) dev_priv); 299961bac78eSDaniel Vetter 300097a19a24STomas Janousek pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 30019ee32feaSDaniel Vetter 3002f71d4af4SJesse Barnes dev->driver->get_vblank_counter = i915_get_vblank_counter; 3003f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 30047d4e146fSEugeni Dodonov if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3005f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3006f71d4af4SJesse Barnes dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3007f71d4af4SJesse Barnes } 3008f71d4af4SJesse Barnes 3009c3613de9SKeith Packard if (drm_core_check_feature(dev, DRIVER_MODESET)) 3010f71d4af4SJesse Barnes dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3011c3613de9SKeith Packard else 3012c3613de9SKeith Packard dev->driver->get_vblank_timestamp = NULL; 3013f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3014f71d4af4SJesse Barnes 30157e231dbeSJesse Barnes if (IS_VALLEYVIEW(dev)) { 30167e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 30177e231dbeSJesse Barnes dev->driver->irq_preinstall = valleyview_irq_preinstall; 30187e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 30197e231dbeSJesse Barnes dev->driver->irq_uninstall = valleyview_irq_uninstall; 30207e231dbeSJesse Barnes dev->driver->enable_vblank = valleyview_enable_vblank; 30217e231dbeSJesse Barnes dev->driver->disable_vblank = valleyview_disable_vblank; 3022fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3023f71d4af4SJesse Barnes } else if (HAS_PCH_SPLIT(dev)) { 3024f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 3025f71d4af4SJesse Barnes dev->driver->irq_preinstall = ironlake_irq_preinstall; 3026f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 3027f71d4af4SJesse Barnes dev->driver->irq_uninstall = ironlake_irq_uninstall; 3028f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 3029f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 303082a28bcfSDaniel Vetter dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3031f71d4af4SJesse Barnes } else { 3032c2798b19SChris Wilson if (INTEL_INFO(dev)->gen == 2) { 3033c2798b19SChris Wilson dev->driver->irq_preinstall = i8xx_irq_preinstall; 3034c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 3035c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 3036c2798b19SChris Wilson dev->driver->irq_uninstall = i8xx_irq_uninstall; 3037a266c7d5SChris Wilson } else if (INTEL_INFO(dev)->gen == 3) { 3038a266c7d5SChris Wilson dev->driver->irq_preinstall = i915_irq_preinstall; 3039a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 3040a266c7d5SChris Wilson dev->driver->irq_uninstall = i915_irq_uninstall; 3041a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 304220afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3043c2798b19SChris Wilson } else { 3044a266c7d5SChris Wilson dev->driver->irq_preinstall = i965_irq_preinstall; 3045a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 3046a266c7d5SChris Wilson dev->driver->irq_uninstall = i965_irq_uninstall; 3047a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 3048bac56d5bSEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3049c2798b19SChris Wilson } 3050f71d4af4SJesse Barnes dev->driver->enable_vblank = i915_enable_vblank; 3051f71d4af4SJesse Barnes dev->driver->disable_vblank = i915_disable_vblank; 3052f71d4af4SJesse Barnes } 3053f71d4af4SJesse Barnes } 305420afbda2SDaniel Vetter 305520afbda2SDaniel Vetter void intel_hpd_init(struct drm_device *dev) 305620afbda2SDaniel Vetter { 305720afbda2SDaniel Vetter struct drm_i915_private *dev_priv = dev->dev_private; 3058821450c6SEgbert Eich struct drm_mode_config *mode_config = &dev->mode_config; 3059821450c6SEgbert Eich struct drm_connector *connector; 3060b5ea2d56SDaniel Vetter unsigned long irqflags; 3061821450c6SEgbert Eich int i; 306220afbda2SDaniel Vetter 3063821450c6SEgbert Eich for (i = 1; i < HPD_NUM_PINS; i++) { 3064821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_cnt = 0; 3065821450c6SEgbert Eich dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3066821450c6SEgbert Eich } 3067821450c6SEgbert Eich list_for_each_entry(connector, &mode_config->connector_list, head) { 3068821450c6SEgbert Eich struct intel_connector *intel_connector = to_intel_connector(connector); 3069821450c6SEgbert Eich connector->polled = intel_connector->polled; 3070821450c6SEgbert Eich if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3071821450c6SEgbert Eich connector->polled = DRM_CONNECTOR_POLL_HPD; 3072821450c6SEgbert Eich } 3073b5ea2d56SDaniel Vetter 3074b5ea2d56SDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 3075b5ea2d56SDaniel Vetter * just to make the assert_spin_locked checks happy. */ 3076b5ea2d56SDaniel Vetter spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 307720afbda2SDaniel Vetter if (dev_priv->display.hpd_irq_setup) 307820afbda2SDaniel Vetter dev_priv->display.hpd_irq_setup(dev); 3079b5ea2d56SDaniel Vetter spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 308020afbda2SDaniel Vetter } 3081