1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34*fcd70cd3SDaniel Vetter #include <drm/drm_irq.h> 35*fcd70cd3SDaniel Vetter #include <drm/drm_drv.h> 36760285e7SDavid Howells #include <drm/i915_drm.h> 37c0e09200SDave Airlie #include "i915_drv.h" 381c5d22f7SChris Wilson #include "i915_trace.h" 3979e53945SJesse Barnes #include "intel_drv.h" 40c0e09200SDave Airlie 41fca52a55SDaniel Vetter /** 42fca52a55SDaniel Vetter * DOC: interrupt handling 43fca52a55SDaniel Vetter * 44fca52a55SDaniel Vetter * These functions provide the basic support for enabling and disabling the 45fca52a55SDaniel Vetter * interrupt handling support. There's a lot more functionality in i915_irq.c 46fca52a55SDaniel Vetter * and related files, but that will be described in separate chapters. 47fca52a55SDaniel Vetter */ 48fca52a55SDaniel Vetter 49e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = { 50e4ce95aaSVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG, 51e4ce95aaSVille Syrjälä }; 52e4ce95aaSVille Syrjälä 5323bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = { 5423bb4cb5SVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 5523bb4cb5SVille Syrjälä }; 5623bb4cb5SVille Syrjälä 573a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = { 583a3b3c7dSVille Syrjälä [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 593a3b3c7dSVille Syrjälä }; 603a3b3c7dSVille Syrjälä 617c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = { 62e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 63e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 64e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 65e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 66e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 67e5868a31SEgbert Eich }; 68e5868a31SEgbert Eich 697c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = { 70e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 7173c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 72e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 73e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 74e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 75e5868a31SEgbert Eich }; 76e5868a31SEgbert Eich 7726951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = { 7874c0b395SVille Syrjälä [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 7926951cafSXiong Zhang [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 8026951cafSXiong Zhang [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 8126951cafSXiong Zhang [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 8226951cafSXiong Zhang [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 8326951cafSXiong Zhang }; 8426951cafSXiong Zhang 857c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 86e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 87e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 88e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 89e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 90e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 91e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 92e5868a31SEgbert Eich }; 93e5868a31SEgbert Eich 947c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 95e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 96e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 97e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 98e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 99e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 100e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 101e5868a31SEgbert Eich }; 102e5868a31SEgbert Eich 1034bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = { 104e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 105e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 106e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 107e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 108e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 109e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 110e5868a31SEgbert Eich }; 111e5868a31SEgbert Eich 112e0a20ad7SShashank Sharma /* BXT hpd list */ 113e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = { 1147f3561beSSonika Jindal [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 115e0a20ad7SShashank Sharma [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 116e0a20ad7SShashank Sharma [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 117e0a20ad7SShashank Sharma }; 118e0a20ad7SShashank Sharma 119b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = { 120b796b971SDhinakaran Pandiyan [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 121b796b971SDhinakaran Pandiyan [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 122b796b971SDhinakaran Pandiyan [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 123b796b971SDhinakaran Pandiyan [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 124121e758eSDhinakaran Pandiyan }; 125121e758eSDhinakaran Pandiyan 12631604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = { 12731604222SAnusha Srivatsa [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 12831604222SAnusha Srivatsa [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 12931604222SAnusha Srivatsa [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 13031604222SAnusha Srivatsa [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 13131604222SAnusha Srivatsa [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 13231604222SAnusha Srivatsa [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 13331604222SAnusha Srivatsa }; 13431604222SAnusha Srivatsa 1355c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 136f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \ 1375c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 1385c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IMR(which)); \ 1395c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), 0); \ 1405c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1415c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1425c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1435c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1445c502442SPaulo Zanoni } while (0) 1455c502442SPaulo Zanoni 1463488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \ 147a9d356a6SPaulo Zanoni I915_WRITE(type##IMR, 0xffffffff); \ 1485c502442SPaulo Zanoni POSTING_READ(type##IMR); \ 149a9d356a6SPaulo Zanoni I915_WRITE(type##IER, 0); \ 1505c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1515c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 1525c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1535c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 154a9d356a6SPaulo Zanoni } while (0) 155a9d356a6SPaulo Zanoni 156e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \ 157e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, 0xffff); \ 158e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 159e9e9848aSVille Syrjälä I915_WRITE16(type##IER, 0); \ 160e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 161e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 162e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 163e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 164e9e9848aSVille Syrjälä } while (0) 165e9e9848aSVille Syrjälä 166337ba017SPaulo Zanoni /* 167337ba017SPaulo Zanoni * We should clear IMR at preinstall/uninstall, and just check at postinstall. 168337ba017SPaulo Zanoni */ 1693488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 170f0f59a00SVille Syrjälä i915_reg_t reg) 171b51a2842SVille Syrjälä { 172b51a2842SVille Syrjälä u32 val = I915_READ(reg); 173b51a2842SVille Syrjälä 174b51a2842SVille Syrjälä if (val == 0) 175b51a2842SVille Syrjälä return; 176b51a2842SVille Syrjälä 177b51a2842SVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 178f0f59a00SVille Syrjälä i915_mmio_reg_offset(reg), val); 179b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 180b51a2842SVille Syrjälä POSTING_READ(reg); 181b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 182b51a2842SVille Syrjälä POSTING_READ(reg); 183b51a2842SVille Syrjälä } 184337ba017SPaulo Zanoni 185e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 186e9e9848aSVille Syrjälä i915_reg_t reg) 187e9e9848aSVille Syrjälä { 188e9e9848aSVille Syrjälä u16 val = I915_READ16(reg); 189e9e9848aSVille Syrjälä 190e9e9848aSVille Syrjälä if (val == 0) 191e9e9848aSVille Syrjälä return; 192e9e9848aSVille Syrjälä 193e9e9848aSVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 194e9e9848aSVille Syrjälä i915_mmio_reg_offset(reg), val); 195e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 196e9e9848aSVille Syrjälä POSTING_READ16(reg); 197e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 198e9e9848aSVille Syrjälä POSTING_READ16(reg); 199e9e9848aSVille Syrjälä } 200e9e9848aSVille Syrjälä 20135079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 2023488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 20335079899SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 2047d1bd539SVille Syrjälä I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 2057d1bd539SVille Syrjälä POSTING_READ(GEN8_##type##_IMR(which)); \ 20635079899SPaulo Zanoni } while (0) 20735079899SPaulo Zanoni 2083488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 2093488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 21035079899SPaulo Zanoni I915_WRITE(type##IER, (ier_val)); \ 2117d1bd539SVille Syrjälä I915_WRITE(type##IMR, (imr_val)); \ 2127d1bd539SVille Syrjälä POSTING_READ(type##IMR); \ 21335079899SPaulo Zanoni } while (0) 21435079899SPaulo Zanoni 215e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 216e9e9848aSVille Syrjälä gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 217e9e9848aSVille Syrjälä I915_WRITE16(type##IER, (ier_val)); \ 218e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, (imr_val)); \ 219e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 220e9e9848aSVille Syrjälä } while (0) 221e9e9848aSVille Syrjälä 222c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 22326705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 224c9a9a268SImre Deak 2250706f17cSEgbert Eich /* For display hotplug interrupt */ 2260706f17cSEgbert Eich static inline void 2270706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 2280706f17cSEgbert Eich uint32_t mask, 2290706f17cSEgbert Eich uint32_t bits) 2300706f17cSEgbert Eich { 2310706f17cSEgbert Eich uint32_t val; 2320706f17cSEgbert Eich 23367520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 2340706f17cSEgbert Eich WARN_ON(bits & ~mask); 2350706f17cSEgbert Eich 2360706f17cSEgbert Eich val = I915_READ(PORT_HOTPLUG_EN); 2370706f17cSEgbert Eich val &= ~mask; 2380706f17cSEgbert Eich val |= bits; 2390706f17cSEgbert Eich I915_WRITE(PORT_HOTPLUG_EN, val); 2400706f17cSEgbert Eich } 2410706f17cSEgbert Eich 2420706f17cSEgbert Eich /** 2430706f17cSEgbert Eich * i915_hotplug_interrupt_update - update hotplug interrupt enable 2440706f17cSEgbert Eich * @dev_priv: driver private 2450706f17cSEgbert Eich * @mask: bits to update 2460706f17cSEgbert Eich * @bits: bits to enable 2470706f17cSEgbert Eich * NOTE: the HPD enable bits are modified both inside and outside 2480706f17cSEgbert Eich * of an interrupt context. To avoid that read-modify-write cycles 2490706f17cSEgbert Eich * interfer, these bits are protected by a spinlock. Since this 2500706f17cSEgbert Eich * function is usually not called from a context where the lock is 2510706f17cSEgbert Eich * held already, this function acquires the lock itself. A non-locking 2520706f17cSEgbert Eich * version is also available. 2530706f17cSEgbert Eich */ 2540706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2550706f17cSEgbert Eich uint32_t mask, 2560706f17cSEgbert Eich uint32_t bits) 2570706f17cSEgbert Eich { 2580706f17cSEgbert Eich spin_lock_irq(&dev_priv->irq_lock); 2590706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 2600706f17cSEgbert Eich spin_unlock_irq(&dev_priv->irq_lock); 2610706f17cSEgbert Eich } 2620706f17cSEgbert Eich 26396606f3bSOscar Mateo static u32 26496606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915, 26596606f3bSOscar Mateo const unsigned int bank, const unsigned int bit); 26696606f3bSOscar Mateo 26760a94324SChris Wilson static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 26896606f3bSOscar Mateo const unsigned int bank, 26996606f3bSOscar Mateo const unsigned int bit) 27096606f3bSOscar Mateo { 27196606f3bSOscar Mateo void __iomem * const regs = i915->regs; 27296606f3bSOscar Mateo u32 dw; 27396606f3bSOscar Mateo 27496606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 27596606f3bSOscar Mateo 27696606f3bSOscar Mateo dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 27796606f3bSOscar Mateo if (dw & BIT(bit)) { 27896606f3bSOscar Mateo /* 27996606f3bSOscar Mateo * According to the BSpec, DW_IIR bits cannot be cleared without 28096606f3bSOscar Mateo * first servicing the Selector & Shared IIR registers. 28196606f3bSOscar Mateo */ 28296606f3bSOscar Mateo gen11_gt_engine_identity(i915, bank, bit); 28396606f3bSOscar Mateo 28496606f3bSOscar Mateo /* 28596606f3bSOscar Mateo * We locked GT INT DW by reading it. If we want to (try 28696606f3bSOscar Mateo * to) recover from this succesfully, we need to clear 28796606f3bSOscar Mateo * our bit, otherwise we are locking the register for 28896606f3bSOscar Mateo * everybody. 28996606f3bSOscar Mateo */ 29096606f3bSOscar Mateo raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 29196606f3bSOscar Mateo 29296606f3bSOscar Mateo return true; 29396606f3bSOscar Mateo } 29496606f3bSOscar Mateo 29596606f3bSOscar Mateo return false; 29696606f3bSOscar Mateo } 29796606f3bSOscar Mateo 298d9dc34f1SVille Syrjälä /** 299d9dc34f1SVille Syrjälä * ilk_update_display_irq - update DEIMR 300d9dc34f1SVille Syrjälä * @dev_priv: driver private 301d9dc34f1SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 302d9dc34f1SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 303d9dc34f1SVille Syrjälä */ 304fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv, 305d9dc34f1SVille Syrjälä uint32_t interrupt_mask, 306d9dc34f1SVille Syrjälä uint32_t enabled_irq_mask) 307036a4a7dSZhenyu Wang { 308d9dc34f1SVille Syrjälä uint32_t new_val; 309d9dc34f1SVille Syrjälä 31067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 3114bc9d430SDaniel Vetter 312d9dc34f1SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 313d9dc34f1SVille Syrjälä 3149df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 315c67a470bSPaulo Zanoni return; 316c67a470bSPaulo Zanoni 317d9dc34f1SVille Syrjälä new_val = dev_priv->irq_mask; 318d9dc34f1SVille Syrjälä new_val &= ~interrupt_mask; 319d9dc34f1SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 320d9dc34f1SVille Syrjälä 321d9dc34f1SVille Syrjälä if (new_val != dev_priv->irq_mask) { 322d9dc34f1SVille Syrjälä dev_priv->irq_mask = new_val; 3231ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 3243143a2bfSChris Wilson POSTING_READ(DEIMR); 325036a4a7dSZhenyu Wang } 326036a4a7dSZhenyu Wang } 327036a4a7dSZhenyu Wang 32843eaea13SPaulo Zanoni /** 32943eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 33043eaea13SPaulo Zanoni * @dev_priv: driver private 33143eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 33243eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 33343eaea13SPaulo Zanoni */ 33443eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 33543eaea13SPaulo Zanoni uint32_t interrupt_mask, 33643eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 33743eaea13SPaulo Zanoni { 33867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 33943eaea13SPaulo Zanoni 34015a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 34115a17aaeSDaniel Vetter 3429df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 343c67a470bSPaulo Zanoni return; 344c67a470bSPaulo Zanoni 34543eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 34643eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 34743eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 34843eaea13SPaulo Zanoni } 34943eaea13SPaulo Zanoni 350480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35143eaea13SPaulo Zanoni { 35243eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 35331bb59ccSChris Wilson POSTING_READ_FW(GTIMR); 35443eaea13SPaulo Zanoni } 35543eaea13SPaulo Zanoni 356480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35743eaea13SPaulo Zanoni { 35843eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 35943eaea13SPaulo Zanoni } 36043eaea13SPaulo Zanoni 361f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 362b900b949SImre Deak { 363d02b98b8SOscar Mateo WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 364d02b98b8SOscar Mateo 365bca2bf2aSPandiyan, Dhinakaran return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 366b900b949SImre Deak } 367b900b949SImre Deak 368f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 369a72fbc3aSImre Deak { 370d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 371d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_MASK; 372d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 373d02b98b8SOscar Mateo return GEN8_GT_IMR(2); 374d02b98b8SOscar Mateo else 375d02b98b8SOscar Mateo return GEN6_PMIMR; 376a72fbc3aSImre Deak } 377a72fbc3aSImre Deak 378f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 379b900b949SImre Deak { 380d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 381d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 382d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 383d02b98b8SOscar Mateo return GEN8_GT_IER(2); 384d02b98b8SOscar Mateo else 385d02b98b8SOscar Mateo return GEN6_PMIER; 386b900b949SImre Deak } 387b900b949SImre Deak 388edbfdb45SPaulo Zanoni /** 389edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 390edbfdb45SPaulo Zanoni * @dev_priv: driver private 391edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 392edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 393edbfdb45SPaulo Zanoni */ 394edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 395edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 396edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 397edbfdb45SPaulo Zanoni { 398605cd25bSPaulo Zanoni uint32_t new_val; 399edbfdb45SPaulo Zanoni 40015a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 40115a17aaeSDaniel Vetter 40267520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 403edbfdb45SPaulo Zanoni 404f4e9af4fSAkash Goel new_val = dev_priv->pm_imr; 405f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 406f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 407f52ecbcfSPaulo Zanoni 408f4e9af4fSAkash Goel if (new_val != dev_priv->pm_imr) { 409f4e9af4fSAkash Goel dev_priv->pm_imr = new_val; 410f4e9af4fSAkash Goel I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 411a72fbc3aSImre Deak POSTING_READ(gen6_pm_imr(dev_priv)); 412edbfdb45SPaulo Zanoni } 413f52ecbcfSPaulo Zanoni } 414edbfdb45SPaulo Zanoni 415f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 416edbfdb45SPaulo Zanoni { 4179939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4189939fba2SImre Deak return; 4199939fba2SImre Deak 420edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 421edbfdb45SPaulo Zanoni } 422edbfdb45SPaulo Zanoni 423f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 4249939fba2SImre Deak { 4259939fba2SImre Deak snb_update_pm_irq(dev_priv, mask, 0); 4269939fba2SImre Deak } 4279939fba2SImre Deak 428f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 429edbfdb45SPaulo Zanoni { 4309939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4319939fba2SImre Deak return; 4329939fba2SImre Deak 433f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, mask); 434f4e9af4fSAkash Goel } 435f4e9af4fSAkash Goel 4363814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 437f4e9af4fSAkash Goel { 438f4e9af4fSAkash Goel i915_reg_t reg = gen6_pm_iir(dev_priv); 439f4e9af4fSAkash Goel 44067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 441f4e9af4fSAkash Goel 442f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 443f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 444f4e9af4fSAkash Goel POSTING_READ(reg); 445f4e9af4fSAkash Goel } 446f4e9af4fSAkash Goel 4473814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 448f4e9af4fSAkash Goel { 44967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 450f4e9af4fSAkash Goel 451f4e9af4fSAkash Goel dev_priv->pm_ier |= enable_mask; 452f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 453f4e9af4fSAkash Goel gen6_unmask_pm_irq(dev_priv, enable_mask); 454f4e9af4fSAkash Goel /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 455f4e9af4fSAkash Goel } 456f4e9af4fSAkash Goel 4573814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 458f4e9af4fSAkash Goel { 45967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 460f4e9af4fSAkash Goel 461f4e9af4fSAkash Goel dev_priv->pm_ier &= ~disable_mask; 462f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, disable_mask); 463f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 464f4e9af4fSAkash Goel /* though a barrier is missing here, but don't really need a one */ 465edbfdb45SPaulo Zanoni } 466edbfdb45SPaulo Zanoni 467d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 468d02b98b8SOscar Mateo { 469d02b98b8SOscar Mateo spin_lock_irq(&dev_priv->irq_lock); 470d02b98b8SOscar Mateo 47196606f3bSOscar Mateo while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 47296606f3bSOscar Mateo ; 473d02b98b8SOscar Mateo 474d02b98b8SOscar Mateo dev_priv->gt_pm.rps.pm_iir = 0; 475d02b98b8SOscar Mateo 476d02b98b8SOscar Mateo spin_unlock_irq(&dev_priv->irq_lock); 477d02b98b8SOscar Mateo } 478d02b98b8SOscar Mateo 479dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 4803cc134e3SImre Deak { 4813cc134e3SImre Deak spin_lock_irq(&dev_priv->irq_lock); 4824668f695SChris Wilson gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 483562d9baeSSagar Arun Kamble dev_priv->gt_pm.rps.pm_iir = 0; 4843cc134e3SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 4853cc134e3SImre Deak } 4863cc134e3SImre Deak 48791d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 488b900b949SImre Deak { 489562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 490562d9baeSSagar Arun Kamble 491562d9baeSSagar Arun Kamble if (READ_ONCE(rps->interrupts_enabled)) 492f2a91d1aSChris Wilson return; 493f2a91d1aSChris Wilson 494b900b949SImre Deak spin_lock_irq(&dev_priv->irq_lock); 495562d9baeSSagar Arun Kamble WARN_ON_ONCE(rps->pm_iir); 49696606f3bSOscar Mateo 497d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 49896606f3bSOscar Mateo WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 499d02b98b8SOscar Mateo else 500c33d247dSChris Wilson WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 50196606f3bSOscar Mateo 502562d9baeSSagar Arun Kamble rps->interrupts_enabled = true; 503b900b949SImre Deak gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 50478e68d36SImre Deak 505b900b949SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 506b900b949SImre Deak } 507b900b949SImre Deak 50891d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 509b900b949SImre Deak { 510562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 511562d9baeSSagar Arun Kamble 512562d9baeSSagar Arun Kamble if (!READ_ONCE(rps->interrupts_enabled)) 513f2a91d1aSChris Wilson return; 514f2a91d1aSChris Wilson 515d4d70aa5SImre Deak spin_lock_irq(&dev_priv->irq_lock); 516562d9baeSSagar Arun Kamble rps->interrupts_enabled = false; 5179939fba2SImre Deak 518b20e3cfeSDave Gordon I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 5199939fba2SImre Deak 5204668f695SChris Wilson gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 52158072ccbSImre Deak 52258072ccbSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 52391c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 524c33d247dSChris Wilson 525c33d247dSChris Wilson /* Now that we will not be generating any more work, flush any 5263814fd77SOscar Mateo * outstanding tasks. As we are called on the RPS idle path, 527c33d247dSChris Wilson * we will reset the GPU to minimum frequencies, so the current 528c33d247dSChris Wilson * state of the worker can be discarded. 529c33d247dSChris Wilson */ 530562d9baeSSagar Arun Kamble cancel_work_sync(&rps->work); 531d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 532d02b98b8SOscar Mateo gen11_reset_rps_interrupts(dev_priv); 533d02b98b8SOscar Mateo else 534c33d247dSChris Wilson gen6_reset_rps_interrupts(dev_priv); 535b900b949SImre Deak } 536b900b949SImre Deak 53726705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 53826705e20SSagar Arun Kamble { 5391be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5401be333d3SSagar Arun Kamble 54126705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 54226705e20SSagar Arun Kamble gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 54326705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 54426705e20SSagar Arun Kamble } 54526705e20SSagar Arun Kamble 54626705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 54726705e20SSagar Arun Kamble { 5481be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5491be333d3SSagar Arun Kamble 55026705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 55126705e20SSagar Arun Kamble if (!dev_priv->guc.interrupts_enabled) { 55226705e20SSagar Arun Kamble WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 55326705e20SSagar Arun Kamble dev_priv->pm_guc_events); 55426705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = true; 55526705e20SSagar Arun Kamble gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 55626705e20SSagar Arun Kamble } 55726705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 55826705e20SSagar Arun Kamble } 55926705e20SSagar Arun Kamble 56026705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 56126705e20SSagar Arun Kamble { 5621be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5631be333d3SSagar Arun Kamble 56426705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 56526705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = false; 56626705e20SSagar Arun Kamble 56726705e20SSagar Arun Kamble gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 56826705e20SSagar Arun Kamble 56926705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 57026705e20SSagar Arun Kamble synchronize_irq(dev_priv->drm.irq); 57126705e20SSagar Arun Kamble 57226705e20SSagar Arun Kamble gen9_reset_guc_interrupts(dev_priv); 57326705e20SSagar Arun Kamble } 57426705e20SSagar Arun Kamble 5750961021aSBen Widawsky /** 5763a3b3c7dSVille Syrjälä * bdw_update_port_irq - update DE port interrupt 5773a3b3c7dSVille Syrjälä * @dev_priv: driver private 5783a3b3c7dSVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 5793a3b3c7dSVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 5803a3b3c7dSVille Syrjälä */ 5813a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 5823a3b3c7dSVille Syrjälä uint32_t interrupt_mask, 5833a3b3c7dSVille Syrjälä uint32_t enabled_irq_mask) 5843a3b3c7dSVille Syrjälä { 5853a3b3c7dSVille Syrjälä uint32_t new_val; 5863a3b3c7dSVille Syrjälä uint32_t old_val; 5873a3b3c7dSVille Syrjälä 58867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 5893a3b3c7dSVille Syrjälä 5903a3b3c7dSVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 5913a3b3c7dSVille Syrjälä 5923a3b3c7dSVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 5933a3b3c7dSVille Syrjälä return; 5943a3b3c7dSVille Syrjälä 5953a3b3c7dSVille Syrjälä old_val = I915_READ(GEN8_DE_PORT_IMR); 5963a3b3c7dSVille Syrjälä 5973a3b3c7dSVille Syrjälä new_val = old_val; 5983a3b3c7dSVille Syrjälä new_val &= ~interrupt_mask; 5993a3b3c7dSVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 6003a3b3c7dSVille Syrjälä 6013a3b3c7dSVille Syrjälä if (new_val != old_val) { 6023a3b3c7dSVille Syrjälä I915_WRITE(GEN8_DE_PORT_IMR, new_val); 6033a3b3c7dSVille Syrjälä POSTING_READ(GEN8_DE_PORT_IMR); 6043a3b3c7dSVille Syrjälä } 6053a3b3c7dSVille Syrjälä } 6063a3b3c7dSVille Syrjälä 6073a3b3c7dSVille Syrjälä /** 608013d3752SVille Syrjälä * bdw_update_pipe_irq - update DE pipe interrupt 609013d3752SVille Syrjälä * @dev_priv: driver private 610013d3752SVille Syrjälä * @pipe: pipe whose interrupt to update 611013d3752SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 612013d3752SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 613013d3752SVille Syrjälä */ 614013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 615013d3752SVille Syrjälä enum pipe pipe, 616013d3752SVille Syrjälä uint32_t interrupt_mask, 617013d3752SVille Syrjälä uint32_t enabled_irq_mask) 618013d3752SVille Syrjälä { 619013d3752SVille Syrjälä uint32_t new_val; 620013d3752SVille Syrjälä 62167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 622013d3752SVille Syrjälä 623013d3752SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 624013d3752SVille Syrjälä 625013d3752SVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 626013d3752SVille Syrjälä return; 627013d3752SVille Syrjälä 628013d3752SVille Syrjälä new_val = dev_priv->de_irq_mask[pipe]; 629013d3752SVille Syrjälä new_val &= ~interrupt_mask; 630013d3752SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 631013d3752SVille Syrjälä 632013d3752SVille Syrjälä if (new_val != dev_priv->de_irq_mask[pipe]) { 633013d3752SVille Syrjälä dev_priv->de_irq_mask[pipe] = new_val; 634013d3752SVille Syrjälä I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 635013d3752SVille Syrjälä POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 636013d3752SVille Syrjälä } 637013d3752SVille Syrjälä } 638013d3752SVille Syrjälä 639013d3752SVille Syrjälä /** 640fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 641fee884edSDaniel Vetter * @dev_priv: driver private 642fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 643fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 644fee884edSDaniel Vetter */ 64547339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 646fee884edSDaniel Vetter uint32_t interrupt_mask, 647fee884edSDaniel Vetter uint32_t enabled_irq_mask) 648fee884edSDaniel Vetter { 649fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 650fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 651fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 652fee884edSDaniel Vetter 65315a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 65415a17aaeSDaniel Vetter 65567520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 656fee884edSDaniel Vetter 6579df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 658c67a470bSPaulo Zanoni return; 659c67a470bSPaulo Zanoni 660fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 661fee884edSDaniel Vetter POSTING_READ(SDEIMR); 662fee884edSDaniel Vetter } 6638664281bSPaulo Zanoni 6646b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 6656b12ca56SVille Syrjälä enum pipe pipe) 6667c463586SKeith Packard { 6676b12ca56SVille Syrjälä u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 66810c59c51SImre Deak u32 enable_mask = status_mask << 16; 66910c59c51SImre Deak 6706b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 6716b12ca56SVille Syrjälä 6726b12ca56SVille Syrjälä if (INTEL_GEN(dev_priv) < 5) 6736b12ca56SVille Syrjälä goto out; 6746b12ca56SVille Syrjälä 67510c59c51SImre Deak /* 676724a6905SVille Syrjälä * On pipe A we don't support the PSR interrupt yet, 677724a6905SVille Syrjälä * on pipe B and C the same bit MBZ. 67810c59c51SImre Deak */ 67910c59c51SImre Deak if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 68010c59c51SImre Deak return 0; 681724a6905SVille Syrjälä /* 682724a6905SVille Syrjälä * On pipe B and C we don't support the PSR interrupt yet, on pipe 683724a6905SVille Syrjälä * A the same bit is for perf counters which we don't use either. 684724a6905SVille Syrjälä */ 685724a6905SVille Syrjälä if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 686724a6905SVille Syrjälä return 0; 68710c59c51SImre Deak 68810c59c51SImre Deak enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 68910c59c51SImre Deak SPRITE0_FLIP_DONE_INT_EN_VLV | 69010c59c51SImre Deak SPRITE1_FLIP_DONE_INT_EN_VLV); 69110c59c51SImre Deak if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 69210c59c51SImre Deak enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 69310c59c51SImre Deak if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 69410c59c51SImre Deak enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 69510c59c51SImre Deak 6966b12ca56SVille Syrjälä out: 6976b12ca56SVille Syrjälä WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 6986b12ca56SVille Syrjälä status_mask & ~PIPESTAT_INT_STATUS_MASK, 6996b12ca56SVille Syrjälä "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 7006b12ca56SVille Syrjälä pipe_name(pipe), enable_mask, status_mask); 7016b12ca56SVille Syrjälä 70210c59c51SImre Deak return enable_mask; 70310c59c51SImre Deak } 70410c59c51SImre Deak 7056b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv, 7066b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 707755e9019SImre Deak { 7086b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 709755e9019SImre Deak u32 enable_mask; 710755e9019SImre Deak 7116b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7126b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7136b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7146b12ca56SVille Syrjälä 7156b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7166b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7176b12ca56SVille Syrjälä 7186b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 7196b12ca56SVille Syrjälä return; 7206b12ca56SVille Syrjälä 7216b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] |= status_mask; 7226b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7236b12ca56SVille Syrjälä 7246b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7256b12ca56SVille Syrjälä POSTING_READ(reg); 726755e9019SImre Deak } 727755e9019SImre Deak 7286b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv, 7296b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 730755e9019SImre Deak { 7316b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 732755e9019SImre Deak u32 enable_mask; 733755e9019SImre Deak 7346b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7356b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7366b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7376b12ca56SVille Syrjälä 7386b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7396b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7406b12ca56SVille Syrjälä 7416b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 7426b12ca56SVille Syrjälä return; 7436b12ca56SVille Syrjälä 7446b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 7456b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7466b12ca56SVille Syrjälä 7476b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7486b12ca56SVille Syrjälä POSTING_READ(reg); 749755e9019SImre Deak } 750755e9019SImre Deak 751c0e09200SDave Airlie /** 752f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 75314bb2c11STvrtko Ursulin * @dev_priv: i915 device private 75401c66889SZhao Yakui */ 75591d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 75601c66889SZhao Yakui { 75791d14251STvrtko Ursulin if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 758f49e38ddSJani Nikula return; 759f49e38ddSJani Nikula 76013321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 76101c66889SZhao Yakui 762755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 76391d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 4) 7643b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, 765755e9019SImre Deak PIPE_LEGACY_BLC_EVENT_STATUS); 7661ec14ad3SChris Wilson 76713321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 76801c66889SZhao Yakui } 76901c66889SZhao Yakui 770f75f3746SVille Syrjälä /* 771f75f3746SVille Syrjälä * This timing diagram depicts the video signal in and 772f75f3746SVille Syrjälä * around the vertical blanking period. 773f75f3746SVille Syrjälä * 774f75f3746SVille Syrjälä * Assumptions about the fictitious mode used in this example: 775f75f3746SVille Syrjälä * vblank_start >= 3 776f75f3746SVille Syrjälä * vsync_start = vblank_start + 1 777f75f3746SVille Syrjälä * vsync_end = vblank_start + 2 778f75f3746SVille Syrjälä * vtotal = vblank_start + 3 779f75f3746SVille Syrjälä * 780f75f3746SVille Syrjälä * start of vblank: 781f75f3746SVille Syrjälä * latch double buffered registers 782f75f3746SVille Syrjälä * increment frame counter (ctg+) 783f75f3746SVille Syrjälä * generate start of vblank interrupt (gen4+) 784f75f3746SVille Syrjälä * | 785f75f3746SVille Syrjälä * | frame start: 786f75f3746SVille Syrjälä * | generate frame start interrupt (aka. vblank interrupt) (gmch) 787f75f3746SVille Syrjälä * | may be shifted forward 1-3 extra lines via PIPECONF 788f75f3746SVille Syrjälä * | | 789f75f3746SVille Syrjälä * | | start of vsync: 790f75f3746SVille Syrjälä * | | generate vsync interrupt 791f75f3746SVille Syrjälä * | | | 792f75f3746SVille Syrjälä * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 793f75f3746SVille Syrjälä * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 794f75f3746SVille Syrjälä * ----va---> <-----------------vb--------------------> <--------va------------- 795f75f3746SVille Syrjälä * | | <----vs-----> | 796f75f3746SVille Syrjälä * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 797f75f3746SVille Syrjälä * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 798f75f3746SVille Syrjälä * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 799f75f3746SVille Syrjälä * | | | 800f75f3746SVille Syrjälä * last visible pixel first visible pixel 801f75f3746SVille Syrjälä * | increment frame counter (gen3/4) 802f75f3746SVille Syrjälä * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 803f75f3746SVille Syrjälä * 804f75f3746SVille Syrjälä * x = horizontal active 805f75f3746SVille Syrjälä * _ = horizontal blanking 806f75f3746SVille Syrjälä * hs = horizontal sync 807f75f3746SVille Syrjälä * va = vertical active 808f75f3746SVille Syrjälä * vb = vertical blanking 809f75f3746SVille Syrjälä * vs = vertical sync 810f75f3746SVille Syrjälä * vbs = vblank_start (number) 811f75f3746SVille Syrjälä * 812f75f3746SVille Syrjälä * Summary: 813f75f3746SVille Syrjälä * - most events happen at the start of horizontal sync 814f75f3746SVille Syrjälä * - frame start happens at the start of horizontal blank, 1-4 lines 815f75f3746SVille Syrjälä * (depending on PIPECONF settings) after the start of vblank 816f75f3746SVille Syrjälä * - gen3/4 pixel and frame counter are synchronized with the start 817f75f3746SVille Syrjälä * of horizontal active on the first line of vertical active 818f75f3746SVille Syrjälä */ 819f75f3746SVille Syrjälä 82042f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 82142f52ef8SKeith Packard * we use as a pipe index 82242f52ef8SKeith Packard */ 82388e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8240a3e67a4SJesse Barnes { 825fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 826f0f59a00SVille Syrjälä i915_reg_t high_frame, low_frame; 8270b2a8e09SVille Syrjälä u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 8285caa0feaSDaniel Vetter const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 829694e409dSVille Syrjälä unsigned long irqflags; 830391f75e2SVille Syrjälä 8310b2a8e09SVille Syrjälä htotal = mode->crtc_htotal; 8320b2a8e09SVille Syrjälä hsync_start = mode->crtc_hsync_start; 8330b2a8e09SVille Syrjälä vbl_start = mode->crtc_vblank_start; 8340b2a8e09SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 8350b2a8e09SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 836391f75e2SVille Syrjälä 8370b2a8e09SVille Syrjälä /* Convert to pixel count */ 8380b2a8e09SVille Syrjälä vbl_start *= htotal; 8390b2a8e09SVille Syrjälä 8400b2a8e09SVille Syrjälä /* Start of vblank event occurs at start of hsync */ 8410b2a8e09SVille Syrjälä vbl_start -= htotal - hsync_start; 8420b2a8e09SVille Syrjälä 8439db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 8449db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 8455eddb70bSChris Wilson 846694e409dSVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 847694e409dSVille Syrjälä 8480a3e67a4SJesse Barnes /* 8490a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 8500a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 8510a3e67a4SJesse Barnes * register. 8520a3e67a4SJesse Barnes */ 8530a3e67a4SJesse Barnes do { 854694e409dSVille Syrjälä high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 855694e409dSVille Syrjälä low = I915_READ_FW(low_frame); 856694e409dSVille Syrjälä high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 8570a3e67a4SJesse Barnes } while (high1 != high2); 8580a3e67a4SJesse Barnes 859694e409dSVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 860694e409dSVille Syrjälä 8615eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 862391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 8635eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 864391f75e2SVille Syrjälä 865391f75e2SVille Syrjälä /* 866391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 867391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 868391f75e2SVille Syrjälä * counter against vblank start. 869391f75e2SVille Syrjälä */ 870edc08d0aSVille Syrjälä return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 8710a3e67a4SJesse Barnes } 8720a3e67a4SJesse Barnes 873974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8749880b7a5SJesse Barnes { 875fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 8769880b7a5SJesse Barnes 877649636efSVille Syrjälä return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 8789880b7a5SJesse Barnes } 8799880b7a5SJesse Barnes 880aec0246fSUma Shankar /* 881aec0246fSUma Shankar * On certain encoders on certain platforms, pipe 882aec0246fSUma Shankar * scanline register will not work to get the scanline, 883aec0246fSUma Shankar * since the timings are driven from the PORT or issues 884aec0246fSUma Shankar * with scanline register updates. 885aec0246fSUma Shankar * This function will use Framestamp and current 886aec0246fSUma Shankar * timestamp registers to calculate the scanline. 887aec0246fSUma Shankar */ 888aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 889aec0246fSUma Shankar { 890aec0246fSUma Shankar struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 891aec0246fSUma Shankar struct drm_vblank_crtc *vblank = 892aec0246fSUma Shankar &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 893aec0246fSUma Shankar const struct drm_display_mode *mode = &vblank->hwmode; 894aec0246fSUma Shankar u32 vblank_start = mode->crtc_vblank_start; 895aec0246fSUma Shankar u32 vtotal = mode->crtc_vtotal; 896aec0246fSUma Shankar u32 htotal = mode->crtc_htotal; 897aec0246fSUma Shankar u32 clock = mode->crtc_clock; 898aec0246fSUma Shankar u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 899aec0246fSUma Shankar 900aec0246fSUma Shankar /* 901aec0246fSUma Shankar * To avoid the race condition where we might cross into the 902aec0246fSUma Shankar * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 903aec0246fSUma Shankar * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 904aec0246fSUma Shankar * during the same frame. 905aec0246fSUma Shankar */ 906aec0246fSUma Shankar do { 907aec0246fSUma Shankar /* 908aec0246fSUma Shankar * This field provides read back of the display 909aec0246fSUma Shankar * pipe frame time stamp. The time stamp value 910aec0246fSUma Shankar * is sampled at every start of vertical blank. 911aec0246fSUma Shankar */ 912aec0246fSUma Shankar scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 913aec0246fSUma Shankar 914aec0246fSUma Shankar /* 915aec0246fSUma Shankar * The TIMESTAMP_CTR register has the current 916aec0246fSUma Shankar * time stamp value. 917aec0246fSUma Shankar */ 918aec0246fSUma Shankar scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 919aec0246fSUma Shankar 920aec0246fSUma Shankar scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 921aec0246fSUma Shankar } while (scan_post_time != scan_prev_time); 922aec0246fSUma Shankar 923aec0246fSUma Shankar scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 924aec0246fSUma Shankar clock), 1000 * htotal); 925aec0246fSUma Shankar scanline = min(scanline, vtotal - 1); 926aec0246fSUma Shankar scanline = (scanline + vblank_start) % vtotal; 927aec0246fSUma Shankar 928aec0246fSUma Shankar return scanline; 929aec0246fSUma Shankar } 930aec0246fSUma Shankar 93175aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 932a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 933a225f079SVille Syrjälä { 934a225f079SVille Syrjälä struct drm_device *dev = crtc->base.dev; 935fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 9365caa0feaSDaniel Vetter const struct drm_display_mode *mode; 9375caa0feaSDaniel Vetter struct drm_vblank_crtc *vblank; 938a225f079SVille Syrjälä enum pipe pipe = crtc->pipe; 93980715b2fSVille Syrjälä int position, vtotal; 940a225f079SVille Syrjälä 94172259536SVille Syrjälä if (!crtc->active) 94272259536SVille Syrjälä return -1; 94372259536SVille Syrjälä 9445caa0feaSDaniel Vetter vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 9455caa0feaSDaniel Vetter mode = &vblank->hwmode; 9465caa0feaSDaniel Vetter 947aec0246fSUma Shankar if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 948aec0246fSUma Shankar return __intel_get_crtc_scanline_from_timestamp(crtc); 949aec0246fSUma Shankar 95080715b2fSVille Syrjälä vtotal = mode->crtc_vtotal; 951a225f079SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 952a225f079SVille Syrjälä vtotal /= 2; 953a225f079SVille Syrjälä 954cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) 95575aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 956a225f079SVille Syrjälä else 95775aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 958a225f079SVille Syrjälä 959a225f079SVille Syrjälä /* 96041b578fbSJesse Barnes * On HSW, the DSL reg (0x70000) appears to return 0 if we 96141b578fbSJesse Barnes * read it just before the start of vblank. So try it again 96241b578fbSJesse Barnes * so we don't accidentally end up spanning a vblank frame 96341b578fbSJesse Barnes * increment, causing the pipe_update_end() code to squak at us. 96441b578fbSJesse Barnes * 96541b578fbSJesse Barnes * The nature of this problem means we can't simply check the ISR 96641b578fbSJesse Barnes * bit and return the vblank start value; nor can we use the scanline 96741b578fbSJesse Barnes * debug register in the transcoder as it appears to have the same 96841b578fbSJesse Barnes * problem. We may need to extend this to include other platforms, 96941b578fbSJesse Barnes * but so far testing only shows the problem on HSW. 97041b578fbSJesse Barnes */ 97191d14251STvrtko Ursulin if (HAS_DDI(dev_priv) && !position) { 97241b578fbSJesse Barnes int i, temp; 97341b578fbSJesse Barnes 97441b578fbSJesse Barnes for (i = 0; i < 100; i++) { 97541b578fbSJesse Barnes udelay(1); 976707bdd3fSVille Syrjälä temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 97741b578fbSJesse Barnes if (temp != position) { 97841b578fbSJesse Barnes position = temp; 97941b578fbSJesse Barnes break; 98041b578fbSJesse Barnes } 98141b578fbSJesse Barnes } 98241b578fbSJesse Barnes } 98341b578fbSJesse Barnes 98441b578fbSJesse Barnes /* 98580715b2fSVille Syrjälä * See update_scanline_offset() for the details on the 98680715b2fSVille Syrjälä * scanline_offset adjustment. 987a225f079SVille Syrjälä */ 98880715b2fSVille Syrjälä return (position + crtc->scanline_offset) % vtotal; 989a225f079SVille Syrjälä } 990a225f079SVille Syrjälä 9911bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 9921bf6ad62SDaniel Vetter bool in_vblank_irq, int *vpos, int *hpos, 9933bb403bfSVille Syrjälä ktime_t *stime, ktime_t *etime, 9943bb403bfSVille Syrjälä const struct drm_display_mode *mode) 9950af7e4dfSMario Kleiner { 996fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 99798187836SVille Syrjälä struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 99898187836SVille Syrjälä pipe); 9993aa18df8SVille Syrjälä int position; 100078e8fc6bSVille Syrjälä int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1001ad3543edSMario Kleiner unsigned long irqflags; 10020af7e4dfSMario Kleiner 1003fc467a22SMaarten Lankhorst if (WARN_ON(!mode->crtc_clock)) { 10040af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 10059db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 10061bf6ad62SDaniel Vetter return false; 10070af7e4dfSMario Kleiner } 10080af7e4dfSMario Kleiner 1009c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 101078e8fc6bSVille Syrjälä hsync_start = mode->crtc_hsync_start; 1011c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 1012c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 1013c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 10140af7e4dfSMario Kleiner 1015d31faf65SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1016d31faf65SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 1017d31faf65SVille Syrjälä vbl_end /= 2; 1018d31faf65SVille Syrjälä vtotal /= 2; 1019d31faf65SVille Syrjälä } 1020d31faf65SVille Syrjälä 1021ad3543edSMario Kleiner /* 1022ad3543edSMario Kleiner * Lock uncore.lock, as we will do multiple timing critical raw 1023ad3543edSMario Kleiner * register reads, potentially with preemption disabled, so the 1024ad3543edSMario Kleiner * following code must not block on uncore.lock. 1025ad3543edSMario Kleiner */ 1026ad3543edSMario Kleiner spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1027ad3543edSMario Kleiner 1028ad3543edSMario Kleiner /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1029ad3543edSMario Kleiner 1030ad3543edSMario Kleiner /* Get optional system timestamp before query. */ 1031ad3543edSMario Kleiner if (stime) 1032ad3543edSMario Kleiner *stime = ktime_get(); 1033ad3543edSMario Kleiner 1034cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10350af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 10360af7e4dfSMario Kleiner * scanout position from Display scan line register. 10370af7e4dfSMario Kleiner */ 1038a225f079SVille Syrjälä position = __intel_get_crtc_scanline(intel_crtc); 10390af7e4dfSMario Kleiner } else { 10400af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 10410af7e4dfSMario Kleiner * We can split this into vertical and horizontal 10420af7e4dfSMario Kleiner * scanout position. 10430af7e4dfSMario Kleiner */ 104475aa3f63SVille Syrjälä position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 10450af7e4dfSMario Kleiner 10463aa18df8SVille Syrjälä /* convert to pixel counts */ 10473aa18df8SVille Syrjälä vbl_start *= htotal; 10483aa18df8SVille Syrjälä vbl_end *= htotal; 10493aa18df8SVille Syrjälä vtotal *= htotal; 105078e8fc6bSVille Syrjälä 105178e8fc6bSVille Syrjälä /* 10527e78f1cbSVille Syrjälä * In interlaced modes, the pixel counter counts all pixels, 10537e78f1cbSVille Syrjälä * so one field will have htotal more pixels. In order to avoid 10547e78f1cbSVille Syrjälä * the reported position from jumping backwards when the pixel 10557e78f1cbSVille Syrjälä * counter is beyond the length of the shorter field, just 10567e78f1cbSVille Syrjälä * clamp the position the length of the shorter field. This 10577e78f1cbSVille Syrjälä * matches how the scanline counter based position works since 10587e78f1cbSVille Syrjälä * the scanline counter doesn't count the two half lines. 10597e78f1cbSVille Syrjälä */ 10607e78f1cbSVille Syrjälä if (position >= vtotal) 10617e78f1cbSVille Syrjälä position = vtotal - 1; 10627e78f1cbSVille Syrjälä 10637e78f1cbSVille Syrjälä /* 106478e8fc6bSVille Syrjälä * Start of vblank interrupt is triggered at start of hsync, 106578e8fc6bSVille Syrjälä * just prior to the first active line of vblank. However we 106678e8fc6bSVille Syrjälä * consider lines to start at the leading edge of horizontal 106778e8fc6bSVille Syrjälä * active. So, should we get here before we've crossed into 106878e8fc6bSVille Syrjälä * the horizontal active of the first line in vblank, we would 106978e8fc6bSVille Syrjälä * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 107078e8fc6bSVille Syrjälä * always add htotal-hsync_start to the current pixel position. 107178e8fc6bSVille Syrjälä */ 107278e8fc6bSVille Syrjälä position = (position + htotal - hsync_start) % vtotal; 10733aa18df8SVille Syrjälä } 10743aa18df8SVille Syrjälä 1075ad3543edSMario Kleiner /* Get optional system timestamp after query. */ 1076ad3543edSMario Kleiner if (etime) 1077ad3543edSMario Kleiner *etime = ktime_get(); 1078ad3543edSMario Kleiner 1079ad3543edSMario Kleiner /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1080ad3543edSMario Kleiner 1081ad3543edSMario Kleiner spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1082ad3543edSMario Kleiner 10833aa18df8SVille Syrjälä /* 10843aa18df8SVille Syrjälä * While in vblank, position will be negative 10853aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 10863aa18df8SVille Syrjälä * vblank, position will be positive counting 10873aa18df8SVille Syrjälä * up since vbl_end. 10883aa18df8SVille Syrjälä */ 10893aa18df8SVille Syrjälä if (position >= vbl_start) 10903aa18df8SVille Syrjälä position -= vbl_end; 10913aa18df8SVille Syrjälä else 10923aa18df8SVille Syrjälä position += vtotal - vbl_end; 10933aa18df8SVille Syrjälä 1094cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10953aa18df8SVille Syrjälä *vpos = position; 10963aa18df8SVille Syrjälä *hpos = 0; 10973aa18df8SVille Syrjälä } else { 10980af7e4dfSMario Kleiner *vpos = position / htotal; 10990af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 11000af7e4dfSMario Kleiner } 11010af7e4dfSMario Kleiner 11021bf6ad62SDaniel Vetter return true; 11030af7e4dfSMario Kleiner } 11040af7e4dfSMario Kleiner 1105a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc) 1106a225f079SVille Syrjälä { 1107fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1108a225f079SVille Syrjälä unsigned long irqflags; 1109a225f079SVille Syrjälä int position; 1110a225f079SVille Syrjälä 1111a225f079SVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1112a225f079SVille Syrjälä position = __intel_get_crtc_scanline(crtc); 1113a225f079SVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1114a225f079SVille Syrjälä 1115a225f079SVille Syrjälä return position; 1116a225f079SVille Syrjälä } 1117a225f079SVille Syrjälä 111891d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1119f97108d1SJesse Barnes { 1120b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 11219270388eSDaniel Vetter u8 new_delay; 11229270388eSDaniel Vetter 1123d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 1124f97108d1SJesse Barnes 112573edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 112673edd18fSDaniel Vetter 112720e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 11289270388eSDaniel Vetter 11297648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1130b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 1131b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 1132f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 1133f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 1134f97108d1SJesse Barnes 1135f97108d1SJesse Barnes /* Handle RCS change request from hw */ 1136b5b72e89SMatthew Garrett if (busy_up > max_avg) { 113720e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 113820e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 113920e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 114020e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 1141b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 114220e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 114320e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 114420e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 114520e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 1146f97108d1SJesse Barnes } 1147f97108d1SJesse Barnes 114891d14251STvrtko Ursulin if (ironlake_set_drps(dev_priv, new_delay)) 114920e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 1150f97108d1SJesse Barnes 1151d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 11529270388eSDaniel Vetter 1153f97108d1SJesse Barnes return; 1154f97108d1SJesse Barnes } 1155f97108d1SJesse Barnes 11560bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine) 1157549f7365SChris Wilson { 11583f88325cSChris Wilson const u32 seqno = intel_engine_get_seqno(engine); 1159e61e0f51SChris Wilson struct i915_request *rq = NULL; 11603f88325cSChris Wilson struct task_struct *tsk = NULL; 116156299fb7SChris Wilson struct intel_wait *wait; 1162dffabc8fSTvrtko Ursulin 11633f88325cSChris Wilson if (unlikely(!engine->breadcrumbs.irq_armed)) 1164bcbd5c33SChris Wilson return; 1165bcbd5c33SChris Wilson 11663f88325cSChris Wilson rcu_read_lock(); 116756299fb7SChris Wilson 116861d3dc70SChris Wilson spin_lock(&engine->breadcrumbs.irq_lock); 116961d3dc70SChris Wilson wait = engine->breadcrumbs.irq_wait; 117056299fb7SChris Wilson if (wait) { 11713f88325cSChris Wilson /* 11723f88325cSChris Wilson * We use a callback from the dma-fence to submit 117356299fb7SChris Wilson * requests after waiting on our own requests. To 117456299fb7SChris Wilson * ensure minimum delay in queuing the next request to 117556299fb7SChris Wilson * hardware, signal the fence now rather than wait for 117656299fb7SChris Wilson * the signaler to be woken up. We still wake up the 117756299fb7SChris Wilson * waiter in order to handle the irq-seqno coherency 117856299fb7SChris Wilson * issues (we may receive the interrupt before the 117956299fb7SChris Wilson * seqno is written, see __i915_request_irq_complete()) 118056299fb7SChris Wilson * and to handle coalescing of multiple seqno updates 118156299fb7SChris Wilson * and many waiters. 118256299fb7SChris Wilson */ 11833f88325cSChris Wilson if (i915_seqno_passed(seqno, wait->seqno)) { 1184e61e0f51SChris Wilson struct i915_request *waiter = wait->request; 1185de4d2106SChris Wilson 1186e3be4079SChris Wilson if (waiter && 1187e3be4079SChris Wilson !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1188de4d2106SChris Wilson &waiter->fence.flags) && 1189de4d2106SChris Wilson intel_wait_check_request(wait, waiter)) 1190e61e0f51SChris Wilson rq = i915_request_get(waiter); 119156299fb7SChris Wilson 11923f88325cSChris Wilson tsk = wait->tsk; 11933f88325cSChris Wilson } 119478796877SChris Wilson 119578796877SChris Wilson engine->breadcrumbs.irq_count++; 119667b807a8SChris Wilson } else { 1197bcbd5c33SChris Wilson if (engine->breadcrumbs.irq_armed) 119867b807a8SChris Wilson __intel_engine_disarm_breadcrumbs(engine); 119956299fb7SChris Wilson } 120061d3dc70SChris Wilson spin_unlock(&engine->breadcrumbs.irq_lock); 120156299fb7SChris Wilson 120224754d75SChris Wilson if (rq) { 1203e3be4079SChris Wilson spin_lock(&rq->lock); 1204e3be4079SChris Wilson dma_fence_signal_locked(&rq->fence); 12054e9a8befSChris Wilson GEM_BUG_ON(!i915_request_completed(rq)); 1206e3be4079SChris Wilson spin_unlock(&rq->lock); 1207e3be4079SChris Wilson 1208e61e0f51SChris Wilson i915_request_put(rq); 120924754d75SChris Wilson } 121056299fb7SChris Wilson 12113f88325cSChris Wilson if (tsk && tsk->state & TASK_NORMAL) 12123f88325cSChris Wilson wake_up_process(tsk); 12133f88325cSChris Wilson 12143f88325cSChris Wilson rcu_read_unlock(); 12153f88325cSChris Wilson 121656299fb7SChris Wilson trace_intel_engine_notify(engine, wait); 1217549f7365SChris Wilson } 1218549f7365SChris Wilson 121943cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv, 122043cf3bf0SChris Wilson struct intel_rps_ei *ei) 122131685c25SDeepak S { 1222679cb6c1SMika Kuoppala ei->ktime = ktime_get_raw(); 122343cf3bf0SChris Wilson ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 122443cf3bf0SChris Wilson ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 122531685c25SDeepak S } 122631685c25SDeepak S 122743cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 122843cf3bf0SChris Wilson { 1229562d9baeSSagar Arun Kamble memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 123043cf3bf0SChris Wilson } 123143cf3bf0SChris Wilson 123243cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 123343cf3bf0SChris Wilson { 1234562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1235562d9baeSSagar Arun Kamble const struct intel_rps_ei *prev = &rps->ei; 123643cf3bf0SChris Wilson struct intel_rps_ei now; 123743cf3bf0SChris Wilson u32 events = 0; 123843cf3bf0SChris Wilson 1239e0e8c7cbSChris Wilson if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 124043cf3bf0SChris Wilson return 0; 124143cf3bf0SChris Wilson 124243cf3bf0SChris Wilson vlv_c0_read(dev_priv, &now); 124331685c25SDeepak S 1244679cb6c1SMika Kuoppala if (prev->ktime) { 1245e0e8c7cbSChris Wilson u64 time, c0; 1246569884e3SChris Wilson u32 render, media; 1247e0e8c7cbSChris Wilson 1248679cb6c1SMika Kuoppala time = ktime_us_delta(now.ktime, prev->ktime); 12498f68d591SChris Wilson 1250e0e8c7cbSChris Wilson time *= dev_priv->czclk_freq; 1251e0e8c7cbSChris Wilson 1252e0e8c7cbSChris Wilson /* Workload can be split between render + media, 1253e0e8c7cbSChris Wilson * e.g. SwapBuffers being blitted in X after being rendered in 1254e0e8c7cbSChris Wilson * mesa. To account for this we need to combine both engines 1255e0e8c7cbSChris Wilson * into our activity counter. 1256e0e8c7cbSChris Wilson */ 1257569884e3SChris Wilson render = now.render_c0 - prev->render_c0; 1258569884e3SChris Wilson media = now.media_c0 - prev->media_c0; 1259569884e3SChris Wilson c0 = max(render, media); 12606b7f6aa7SMika Kuoppala c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1261e0e8c7cbSChris Wilson 126260548c55SChris Wilson if (c0 > time * rps->power.up_threshold) 1263e0e8c7cbSChris Wilson events = GEN6_PM_RP_UP_THRESHOLD; 126460548c55SChris Wilson else if (c0 < time * rps->power.down_threshold) 1265e0e8c7cbSChris Wilson events = GEN6_PM_RP_DOWN_THRESHOLD; 126631685c25SDeepak S } 126731685c25SDeepak S 1268562d9baeSSagar Arun Kamble rps->ei = now; 126943cf3bf0SChris Wilson return events; 127031685c25SDeepak S } 127131685c25SDeepak S 12724912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 12733b8d8d91SJesse Barnes { 12742d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1275562d9baeSSagar Arun Kamble container_of(work, struct drm_i915_private, gt_pm.rps.work); 1276562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 12777c0a16adSChris Wilson bool client_boost = false; 12788d3afd7dSChris Wilson int new_delay, adj, min, max; 12797c0a16adSChris Wilson u32 pm_iir = 0; 12803b8d8d91SJesse Barnes 128159cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1282562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1283562d9baeSSagar Arun Kamble pm_iir = fetch_and_zero(&rps->pm_iir); 1284562d9baeSSagar Arun Kamble client_boost = atomic_read(&rps->num_waiters); 1285d4d70aa5SImre Deak } 128659cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 12874912d041SBen Widawsky 128860611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 1289a6706b45SDeepak S WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 12908d3afd7dSChris Wilson if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 12917c0a16adSChris Wilson goto out; 12923b8d8d91SJesse Barnes 12939f817501SSagar Arun Kamble mutex_lock(&dev_priv->pcu_lock); 12947b9e0ae6SChris Wilson 129543cf3bf0SChris Wilson pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 129643cf3bf0SChris Wilson 1297562d9baeSSagar Arun Kamble adj = rps->last_adj; 1298562d9baeSSagar Arun Kamble new_delay = rps->cur_freq; 1299562d9baeSSagar Arun Kamble min = rps->min_freq_softlimit; 1300562d9baeSSagar Arun Kamble max = rps->max_freq_softlimit; 13017b92c1bdSChris Wilson if (client_boost) 1302562d9baeSSagar Arun Kamble max = rps->max_freq; 1303562d9baeSSagar Arun Kamble if (client_boost && new_delay < rps->boost_freq) { 1304562d9baeSSagar Arun Kamble new_delay = rps->boost_freq; 13058d3afd7dSChris Wilson adj = 0; 13068d3afd7dSChris Wilson } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1307dd75fdc8SChris Wilson if (adj > 0) 1308dd75fdc8SChris Wilson adj *= 2; 1309edcf284bSChris Wilson else /* CHV needs even encode values */ 1310edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 13117e79a683SSagar Arun Kamble 1312562d9baeSSagar Arun Kamble if (new_delay >= rps->max_freq_softlimit) 13137e79a683SSagar Arun Kamble adj = 0; 13147b92c1bdSChris Wilson } else if (client_boost) { 1315f5a4c67dSChris Wilson adj = 0; 1316dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1317562d9baeSSagar Arun Kamble if (rps->cur_freq > rps->efficient_freq) 1318562d9baeSSagar Arun Kamble new_delay = rps->efficient_freq; 1319562d9baeSSagar Arun Kamble else if (rps->cur_freq > rps->min_freq_softlimit) 1320562d9baeSSagar Arun Kamble new_delay = rps->min_freq_softlimit; 1321dd75fdc8SChris Wilson adj = 0; 1322dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1323dd75fdc8SChris Wilson if (adj < 0) 1324dd75fdc8SChris Wilson adj *= 2; 1325edcf284bSChris Wilson else /* CHV needs even encode values */ 1326edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 13277e79a683SSagar Arun Kamble 1328562d9baeSSagar Arun Kamble if (new_delay <= rps->min_freq_softlimit) 13297e79a683SSagar Arun Kamble adj = 0; 1330dd75fdc8SChris Wilson } else { /* unknown event */ 1331edcf284bSChris Wilson adj = 0; 1332dd75fdc8SChris Wilson } 13333b8d8d91SJesse Barnes 1334562d9baeSSagar Arun Kamble rps->last_adj = adj; 1335edcf284bSChris Wilson 133679249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 133779249636SBen Widawsky * interrupt 133879249636SBen Widawsky */ 1339edcf284bSChris Wilson new_delay += adj; 13408d3afd7dSChris Wilson new_delay = clamp_t(int, new_delay, min, max); 134127544369SDeepak S 13429fcee2f7SChris Wilson if (intel_set_rps(dev_priv, new_delay)) { 13439fcee2f7SChris Wilson DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1344562d9baeSSagar Arun Kamble rps->last_adj = 0; 13459fcee2f7SChris Wilson } 13463b8d8d91SJesse Barnes 13479f817501SSagar Arun Kamble mutex_unlock(&dev_priv->pcu_lock); 13487c0a16adSChris Wilson 13497c0a16adSChris Wilson out: 13507c0a16adSChris Wilson /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 13517c0a16adSChris Wilson spin_lock_irq(&dev_priv->irq_lock); 1352562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) 13537c0a16adSChris Wilson gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 13547c0a16adSChris Wilson spin_unlock_irq(&dev_priv->irq_lock); 13553b8d8d91SJesse Barnes } 13563b8d8d91SJesse Barnes 1357e3689190SBen Widawsky 1358e3689190SBen Widawsky /** 1359e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 1360e3689190SBen Widawsky * occurred. 1361e3689190SBen Widawsky * @work: workqueue struct 1362e3689190SBen Widawsky * 1363e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 1364e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 1365e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 1366e3689190SBen Widawsky */ 1367e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 1368e3689190SBen Widawsky { 13692d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1370cefcff8fSJoonas Lahtinen container_of(work, typeof(*dev_priv), l3_parity.error_work); 1371e3689190SBen Widawsky u32 error_status, row, bank, subbank; 137235a85ac6SBen Widawsky char *parity_event[6]; 1373e3689190SBen Widawsky uint32_t misccpctl; 137435a85ac6SBen Widawsky uint8_t slice = 0; 1375e3689190SBen Widawsky 1376e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1377e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1378e3689190SBen Widawsky * any time we access those registers. 1379e3689190SBen Widawsky */ 138091c8a326SChris Wilson mutex_lock(&dev_priv->drm.struct_mutex); 1381e3689190SBen Widawsky 138235a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 138335a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 138435a85ac6SBen Widawsky goto out; 138535a85ac6SBen Widawsky 1386e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1387e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1388e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1389e3689190SBen Widawsky 139035a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1391f0f59a00SVille Syrjälä i915_reg_t reg; 139235a85ac6SBen Widawsky 139335a85ac6SBen Widawsky slice--; 13942d1fe073SJoonas Lahtinen if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 139535a85ac6SBen Widawsky break; 139635a85ac6SBen Widawsky 139735a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 139835a85ac6SBen Widawsky 13996fa1c5f1SVille Syrjälä reg = GEN7_L3CDERRST1(slice); 140035a85ac6SBen Widawsky 140135a85ac6SBen Widawsky error_status = I915_READ(reg); 1402e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1403e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1404e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1405e3689190SBen Widawsky 140635a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 140735a85ac6SBen Widawsky POSTING_READ(reg); 1408e3689190SBen Widawsky 1409cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1410e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1411e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1412e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 141335a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 141435a85ac6SBen Widawsky parity_event[5] = NULL; 1415e3689190SBen Widawsky 141691c8a326SChris Wilson kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1417e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1418e3689190SBen Widawsky 141935a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 142035a85ac6SBen Widawsky slice, row, bank, subbank); 1421e3689190SBen Widawsky 142235a85ac6SBen Widawsky kfree(parity_event[4]); 1423e3689190SBen Widawsky kfree(parity_event[3]); 1424e3689190SBen Widawsky kfree(parity_event[2]); 1425e3689190SBen Widawsky kfree(parity_event[1]); 1426e3689190SBen Widawsky } 1427e3689190SBen Widawsky 142835a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 142935a85ac6SBen Widawsky 143035a85ac6SBen Widawsky out: 143135a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 14324cb21832SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 14332d1fe073SJoonas Lahtinen gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 14344cb21832SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 143535a85ac6SBen Widawsky 143691c8a326SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 143735a85ac6SBen Widawsky } 143835a85ac6SBen Widawsky 1439261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1440261e40b8SVille Syrjälä u32 iir) 1441e3689190SBen Widawsky { 1442261e40b8SVille Syrjälä if (!HAS_L3_DPF(dev_priv)) 1443e3689190SBen Widawsky return; 1444e3689190SBen Widawsky 1445d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1446261e40b8SVille Syrjälä gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1447d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1448e3689190SBen Widawsky 1449261e40b8SVille Syrjälä iir &= GT_PARITY_ERROR(dev_priv); 145035a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 145135a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 145235a85ac6SBen Widawsky 145335a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 145435a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 145535a85ac6SBen Widawsky 1456a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1457e3689190SBen Widawsky } 1458e3689190SBen Widawsky 1459261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1460f1af8fc1SPaulo Zanoni u32 gt_iir) 1461f1af8fc1SPaulo Zanoni { 1462f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14633b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1464f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 14653b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1466f1af8fc1SPaulo Zanoni } 1467f1af8fc1SPaulo Zanoni 1468261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1469e7b4c6b1SDaniel Vetter u32 gt_iir) 1470e7b4c6b1SDaniel Vetter { 1471f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14723b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1473cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 14743b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1475cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 14763b3f1650SAkash Goel notify_ring(dev_priv->engine[BCS]); 1477e7b4c6b1SDaniel Vetter 1478cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1479cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1480aaecdf61SDaniel Vetter GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1481aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1482e3689190SBen Widawsky 1483261e40b8SVille Syrjälä if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1484261e40b8SVille Syrjälä ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1485e7b4c6b1SDaniel Vetter } 1486e7b4c6b1SDaniel Vetter 14875d3d69d5SChris Wilson static void 148851f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1489fbcc1a0cSNick Hoath { 149031de7350SChris Wilson bool tasklet = false; 1491f747026cSChris Wilson 1492fd8526e5SChris Wilson if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 14938ea397faSChris Wilson tasklet = true; 149431de7350SChris Wilson 149551f6b0f9SChris Wilson if (iir & GT_RENDER_USER_INTERRUPT) { 149631de7350SChris Wilson notify_ring(engine); 149793ffbe8eSMichal Wajdeczko tasklet |= USES_GUC_SUBMISSION(engine->i915); 149831de7350SChris Wilson } 149931de7350SChris Wilson 150031de7350SChris Wilson if (tasklet) 1501fd8526e5SChris Wilson tasklet_hi_schedule(&engine->execlists.tasklet); 1502fbcc1a0cSNick Hoath } 1503fbcc1a0cSNick Hoath 15042e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915, 150555ef72f2SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1506abd58f01SBen Widawsky { 15072e4a5b25SChris Wilson void __iomem * const regs = i915->regs; 15082e4a5b25SChris Wilson 1509f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1510f0fd96f5SChris Wilson GEN8_GT_BCS_IRQ | \ 1511f0fd96f5SChris Wilson GEN8_GT_VCS1_IRQ | \ 1512f0fd96f5SChris Wilson GEN8_GT_VCS2_IRQ | \ 1513f0fd96f5SChris Wilson GEN8_GT_VECS_IRQ | \ 1514f0fd96f5SChris Wilson GEN8_GT_PM_IRQ | \ 1515f0fd96f5SChris Wilson GEN8_GT_GUC_IRQ) 1516f0fd96f5SChris Wilson 1517abd58f01SBen Widawsky if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15182e4a5b25SChris Wilson gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 15192e4a5b25SChris Wilson if (likely(gt_iir[0])) 15202e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1521abd58f01SBen Widawsky } 1522abd58f01SBen Widawsky 152385f9b5f9SZhao Yakui if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15242e4a5b25SChris Wilson gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 15252e4a5b25SChris Wilson if (likely(gt_iir[1])) 15262e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 152774cdb337SChris Wilson } 152874cdb337SChris Wilson 152926705e20SSagar Arun Kamble if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15302e4a5b25SChris Wilson gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1531f4de7794SChris Wilson if (likely(gt_iir[2])) 1532f4de7794SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 15330961021aSBen Widawsky } 15342e4a5b25SChris Wilson 15352e4a5b25SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15362e4a5b25SChris Wilson gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 15372e4a5b25SChris Wilson if (likely(gt_iir[3])) 15382e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 153955ef72f2SChris Wilson } 1540abd58f01SBen Widawsky } 1541abd58f01SBen Widawsky 15422e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1543f0fd96f5SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1544e30e251aSVille Syrjälä { 1545f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15462e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[RCS], 154751f6b0f9SChris Wilson gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 15482e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[BCS], 154951f6b0f9SChris Wilson gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1550e30e251aSVille Syrjälä } 1551e30e251aSVille Syrjälä 1552f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15532e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS], 155451f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 15552e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS2], 155651f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1557e30e251aSVille Syrjälä } 1558e30e251aSVille Syrjälä 1559f0fd96f5SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15602e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VECS], 156151f6b0f9SChris Wilson gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1562f0fd96f5SChris Wilson } 1563e30e251aSVille Syrjälä 1564f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15652e4a5b25SChris Wilson gen6_rps_irq_handler(i915, gt_iir[2]); 15662e4a5b25SChris Wilson gen9_guc_irq_handler(i915, gt_iir[2]); 1567e30e251aSVille Syrjälä } 1568f0fd96f5SChris Wilson } 1569e30e251aSVille Syrjälä 1570af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1571121e758eSDhinakaran Pandiyan { 1572af92058fSVille Syrjälä switch (pin) { 1573af92058fSVille Syrjälä case HPD_PORT_C: 1574121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1575af92058fSVille Syrjälä case HPD_PORT_D: 1576121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1577af92058fSVille Syrjälä case HPD_PORT_E: 1578121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1579af92058fSVille Syrjälä case HPD_PORT_F: 1580121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1581121e758eSDhinakaran Pandiyan default: 1582121e758eSDhinakaran Pandiyan return false; 1583121e758eSDhinakaran Pandiyan } 1584121e758eSDhinakaran Pandiyan } 1585121e758eSDhinakaran Pandiyan 1586af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 158763c88d22SImre Deak { 1588af92058fSVille Syrjälä switch (pin) { 1589af92058fSVille Syrjälä case HPD_PORT_A: 1590195baa06SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1591af92058fSVille Syrjälä case HPD_PORT_B: 159263c88d22SImre Deak return val & PORTB_HOTPLUG_LONG_DETECT; 1593af92058fSVille Syrjälä case HPD_PORT_C: 159463c88d22SImre Deak return val & PORTC_HOTPLUG_LONG_DETECT; 159563c88d22SImre Deak default: 159663c88d22SImre Deak return false; 159763c88d22SImre Deak } 159863c88d22SImre Deak } 159963c88d22SImre Deak 1600af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 160131604222SAnusha Srivatsa { 1602af92058fSVille Syrjälä switch (pin) { 1603af92058fSVille Syrjälä case HPD_PORT_A: 160431604222SAnusha Srivatsa return val & ICP_DDIA_HPD_LONG_DETECT; 1605af92058fSVille Syrjälä case HPD_PORT_B: 160631604222SAnusha Srivatsa return val & ICP_DDIB_HPD_LONG_DETECT; 160731604222SAnusha Srivatsa default: 160831604222SAnusha Srivatsa return false; 160931604222SAnusha Srivatsa } 161031604222SAnusha Srivatsa } 161131604222SAnusha Srivatsa 1612af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 161331604222SAnusha Srivatsa { 1614af92058fSVille Syrjälä switch (pin) { 1615af92058fSVille Syrjälä case HPD_PORT_C: 161631604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1617af92058fSVille Syrjälä case HPD_PORT_D: 161831604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1619af92058fSVille Syrjälä case HPD_PORT_E: 162031604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1621af92058fSVille Syrjälä case HPD_PORT_F: 162231604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 162331604222SAnusha Srivatsa default: 162431604222SAnusha Srivatsa return false; 162531604222SAnusha Srivatsa } 162631604222SAnusha Srivatsa } 162731604222SAnusha Srivatsa 1628af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 16296dbf30ceSVille Syrjälä { 1630af92058fSVille Syrjälä switch (pin) { 1631af92058fSVille Syrjälä case HPD_PORT_E: 16326dbf30ceSVille Syrjälä return val & PORTE_HOTPLUG_LONG_DETECT; 16336dbf30ceSVille Syrjälä default: 16346dbf30ceSVille Syrjälä return false; 16356dbf30ceSVille Syrjälä } 16366dbf30ceSVille Syrjälä } 16376dbf30ceSVille Syrjälä 1638af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 163974c0b395SVille Syrjälä { 1640af92058fSVille Syrjälä switch (pin) { 1641af92058fSVille Syrjälä case HPD_PORT_A: 164274c0b395SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1643af92058fSVille Syrjälä case HPD_PORT_B: 164474c0b395SVille Syrjälä return val & PORTB_HOTPLUG_LONG_DETECT; 1645af92058fSVille Syrjälä case HPD_PORT_C: 164674c0b395SVille Syrjälä return val & PORTC_HOTPLUG_LONG_DETECT; 1647af92058fSVille Syrjälä case HPD_PORT_D: 164874c0b395SVille Syrjälä return val & PORTD_HOTPLUG_LONG_DETECT; 164974c0b395SVille Syrjälä default: 165074c0b395SVille Syrjälä return false; 165174c0b395SVille Syrjälä } 165274c0b395SVille Syrjälä } 165374c0b395SVille Syrjälä 1654af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1655e4ce95aaSVille Syrjälä { 1656af92058fSVille Syrjälä switch (pin) { 1657af92058fSVille Syrjälä case HPD_PORT_A: 1658e4ce95aaSVille Syrjälä return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1659e4ce95aaSVille Syrjälä default: 1660e4ce95aaSVille Syrjälä return false; 1661e4ce95aaSVille Syrjälä } 1662e4ce95aaSVille Syrjälä } 1663e4ce95aaSVille Syrjälä 1664af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 166513cf5504SDave Airlie { 1666af92058fSVille Syrjälä switch (pin) { 1667af92058fSVille Syrjälä case HPD_PORT_B: 1668676574dfSJani Nikula return val & PORTB_HOTPLUG_LONG_DETECT; 1669af92058fSVille Syrjälä case HPD_PORT_C: 1670676574dfSJani Nikula return val & PORTC_HOTPLUG_LONG_DETECT; 1671af92058fSVille Syrjälä case HPD_PORT_D: 1672676574dfSJani Nikula return val & PORTD_HOTPLUG_LONG_DETECT; 1673676574dfSJani Nikula default: 1674676574dfSJani Nikula return false; 167513cf5504SDave Airlie } 167613cf5504SDave Airlie } 167713cf5504SDave Airlie 1678af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 167913cf5504SDave Airlie { 1680af92058fSVille Syrjälä switch (pin) { 1681af92058fSVille Syrjälä case HPD_PORT_B: 1682676574dfSJani Nikula return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1683af92058fSVille Syrjälä case HPD_PORT_C: 1684676574dfSJani Nikula return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1685af92058fSVille Syrjälä case HPD_PORT_D: 1686676574dfSJani Nikula return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1687676574dfSJani Nikula default: 1688676574dfSJani Nikula return false; 168913cf5504SDave Airlie } 169013cf5504SDave Airlie } 169113cf5504SDave Airlie 169242db67d6SVille Syrjälä /* 169342db67d6SVille Syrjälä * Get a bit mask of pins that have triggered, and which ones may be long. 169442db67d6SVille Syrjälä * This can be called multiple times with the same masks to accumulate 169542db67d6SVille Syrjälä * hotplug detection results from several registers. 169642db67d6SVille Syrjälä * 169742db67d6SVille Syrjälä * Note that the caller is expected to zero out the masks initially. 169842db67d6SVille Syrjälä */ 1699cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1700cf53902fSRodrigo Vivi u32 *pin_mask, u32 *long_mask, 17018c841e57SJani Nikula u32 hotplug_trigger, u32 dig_hotplug_reg, 1702fd63e2a9SImre Deak const u32 hpd[HPD_NUM_PINS], 1703af92058fSVille Syrjälä bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1704676574dfSJani Nikula { 1705e9be2850SVille Syrjälä enum hpd_pin pin; 1706676574dfSJani Nikula 1707e9be2850SVille Syrjälä for_each_hpd_pin(pin) { 1708e9be2850SVille Syrjälä if ((hpd[pin] & hotplug_trigger) == 0) 17098c841e57SJani Nikula continue; 17108c841e57SJani Nikula 1711e9be2850SVille Syrjälä *pin_mask |= BIT(pin); 1712676574dfSJani Nikula 1713af92058fSVille Syrjälä if (long_pulse_detect(pin, dig_hotplug_reg)) 1714e9be2850SVille Syrjälä *long_mask |= BIT(pin); 1715676574dfSJani Nikula } 1716676574dfSJani Nikula 1717f88f0478SVille Syrjälä DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1718f88f0478SVille Syrjälä hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1719676574dfSJani Nikula 1720676574dfSJani Nikula } 1721676574dfSJani Nikula 172291d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1723515ac2bbSDaniel Vetter { 172428c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1725515ac2bbSDaniel Vetter } 1726515ac2bbSDaniel Vetter 172791d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1728ce99c256SDaniel Vetter { 17299ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1730ce99c256SDaniel Vetter } 1731ce99c256SDaniel Vetter 17328bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 173391d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 173491d14251STvrtko Ursulin enum pipe pipe, 1735eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1736eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 17378bc5e955SDaniel Vetter uint32_t crc4) 17388bf1e9f1SShuang He { 17398bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 17408c6b709dSTomeu Vizoso struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17418c6b709dSTomeu Vizoso uint32_t crcs[5]; 1742b2c88f5bSDamien Lespiau 1743d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 17448c6b709dSTomeu Vizoso /* 17458c6b709dSTomeu Vizoso * For some not yet identified reason, the first CRC is 17468c6b709dSTomeu Vizoso * bonkers. So let's just wait for the next vblank and read 17478c6b709dSTomeu Vizoso * out the buggy result. 17488c6b709dSTomeu Vizoso * 1749163e8aecSRodrigo Vivi * On GEN8+ sometimes the second CRC is bonkers as well, so 17508c6b709dSTomeu Vizoso * don't trust that one either. 17518c6b709dSTomeu Vizoso */ 1752033b7a23SMaarten Lankhorst if (pipe_crc->skipped <= 0 || 1753163e8aecSRodrigo Vivi (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 17548c6b709dSTomeu Vizoso pipe_crc->skipped++; 17558c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17568c6b709dSTomeu Vizoso return; 17578c6b709dSTomeu Vizoso } 17588c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17596cc42152SMaarten Lankhorst 17608c6b709dSTomeu Vizoso crcs[0] = crc0; 17618c6b709dSTomeu Vizoso crcs[1] = crc1; 17628c6b709dSTomeu Vizoso crcs[2] = crc2; 17638c6b709dSTomeu Vizoso crcs[3] = crc3; 17648c6b709dSTomeu Vizoso crcs[4] = crc4; 1765246ee524STomeu Vizoso drm_crtc_add_crc_entry(&crtc->base, true, 1766ca814b25SDaniel Vetter drm_crtc_accurate_vblank_count(&crtc->base), 1767246ee524STomeu Vizoso crcs); 17688c6b709dSTomeu Vizoso } 1769277de95eSDaniel Vetter #else 1770277de95eSDaniel Vetter static inline void 177191d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 177291d14251STvrtko Ursulin enum pipe pipe, 1773277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1774277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1775277de95eSDaniel Vetter uint32_t crc4) {} 1776277de95eSDaniel Vetter #endif 1777eba94eb9SDaniel Vetter 1778277de95eSDaniel Vetter 177991d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 178091d14251STvrtko Ursulin enum pipe pipe) 17815a69b89fSDaniel Vetter { 178291d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 17835a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 17845a69b89fSDaniel Vetter 0, 0, 0, 0); 17855a69b89fSDaniel Vetter } 17865a69b89fSDaniel Vetter 178791d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 178891d14251STvrtko Ursulin enum pipe pipe) 1789eba94eb9SDaniel Vetter { 179091d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 1791eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1792eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1793eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1794eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 17958bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1796eba94eb9SDaniel Vetter } 17975b3a856bSDaniel Vetter 179891d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 179991d14251STvrtko Ursulin enum pipe pipe) 18005b3a856bSDaniel Vetter { 18010b5c5ed0SDaniel Vetter uint32_t res1, res2; 18020b5c5ed0SDaniel Vetter 180391d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 3) 18040b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 18050b5c5ed0SDaniel Vetter else 18060b5c5ed0SDaniel Vetter res1 = 0; 18070b5c5ed0SDaniel Vetter 180891d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 18090b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 18100b5c5ed0SDaniel Vetter else 18110b5c5ed0SDaniel Vetter res2 = 0; 18125b3a856bSDaniel Vetter 181391d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 18140b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 18150b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 18160b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 18170b5c5ed0SDaniel Vetter res1, res2); 18185b3a856bSDaniel Vetter } 18198bf1e9f1SShuang He 18201403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 18211403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 18221403c0d4SPaulo Zanoni * the work queue. */ 18231403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1824baf02a1fSBen Widawsky { 1825562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1826562d9baeSSagar Arun Kamble 1827a6706b45SDeepak S if (pm_iir & dev_priv->pm_rps_events) { 182859cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 1829f4e9af4fSAkash Goel gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1830562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1831562d9baeSSagar Arun Kamble rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1832562d9baeSSagar Arun Kamble schedule_work(&rps->work); 183341a05a3aSDaniel Vetter } 1834d4d70aa5SImre Deak spin_unlock(&dev_priv->irq_lock); 1835d4d70aa5SImre Deak } 1836baf02a1fSBen Widawsky 1837bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 1838c9a9a268SImre Deak return; 1839c9a9a268SImre Deak 18402d1fe073SJoonas Lahtinen if (HAS_VEBOX(dev_priv)) { 184112638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 18423b3f1650SAkash Goel notify_ring(dev_priv->engine[VECS]); 184312638c57SBen Widawsky 1844aaecdf61SDaniel Vetter if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1845aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 184612638c57SBen Widawsky } 18471403c0d4SPaulo Zanoni } 1848baf02a1fSBen Widawsky 184926705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 185026705e20SSagar Arun Kamble { 185193bf8096SMichal Wajdeczko if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 185293bf8096SMichal Wajdeczko intel_guc_to_host_event_handler(&dev_priv->guc); 185326705e20SSagar Arun Kamble } 185426705e20SSagar Arun Kamble 185544d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 185644d9241eSVille Syrjälä { 185744d9241eSVille Syrjälä enum pipe pipe; 185844d9241eSVille Syrjälä 185944d9241eSVille Syrjälä for_each_pipe(dev_priv, pipe) { 186044d9241eSVille Syrjälä I915_WRITE(PIPESTAT(pipe), 186144d9241eSVille Syrjälä PIPESTAT_INT_STATUS_MASK | 186244d9241eSVille Syrjälä PIPE_FIFO_UNDERRUN_STATUS); 186344d9241eSVille Syrjälä 186444d9241eSVille Syrjälä dev_priv->pipestat_irq_mask[pipe] = 0; 186544d9241eSVille Syrjälä } 186644d9241eSVille Syrjälä } 186744d9241eSVille Syrjälä 1868eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 186991d14251STvrtko Ursulin u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 18707e231dbeSJesse Barnes { 18717e231dbeSJesse Barnes int pipe; 18727e231dbeSJesse Barnes 187358ead0d7SImre Deak spin_lock(&dev_priv->irq_lock); 18741ca993d2SVille Syrjälä 18751ca993d2SVille Syrjälä if (!dev_priv->display_irqs_enabled) { 18761ca993d2SVille Syrjälä spin_unlock(&dev_priv->irq_lock); 18771ca993d2SVille Syrjälä return; 18781ca993d2SVille Syrjälä } 18791ca993d2SVille Syrjälä 1880055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 1881f0f59a00SVille Syrjälä i915_reg_t reg; 18826b12ca56SVille Syrjälä u32 status_mask, enable_mask, iir_bit = 0; 188391d181ddSImre Deak 1884bbb5eebfSDaniel Vetter /* 1885bbb5eebfSDaniel Vetter * PIPESTAT bits get signalled even when the interrupt is 1886bbb5eebfSDaniel Vetter * disabled with the mask bits, and some of the status bits do 1887bbb5eebfSDaniel Vetter * not generate interrupts at all (like the underrun bit). Hence 1888bbb5eebfSDaniel Vetter * we need to be careful that we only handle what we want to 1889bbb5eebfSDaniel Vetter * handle. 1890bbb5eebfSDaniel Vetter */ 18910f239f4cSDaniel Vetter 18920f239f4cSDaniel Vetter /* fifo underruns are filterered in the underrun handler. */ 18936b12ca56SVille Syrjälä status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1894bbb5eebfSDaniel Vetter 1895bbb5eebfSDaniel Vetter switch (pipe) { 1896bbb5eebfSDaniel Vetter case PIPE_A: 1897bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1898bbb5eebfSDaniel Vetter break; 1899bbb5eebfSDaniel Vetter case PIPE_B: 1900bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1901bbb5eebfSDaniel Vetter break; 19023278f67fSVille Syrjälä case PIPE_C: 19033278f67fSVille Syrjälä iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 19043278f67fSVille Syrjälä break; 1905bbb5eebfSDaniel Vetter } 1906bbb5eebfSDaniel Vetter if (iir & iir_bit) 19076b12ca56SVille Syrjälä status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1908bbb5eebfSDaniel Vetter 19096b12ca56SVille Syrjälä if (!status_mask) 191091d181ddSImre Deak continue; 191191d181ddSImre Deak 191291d181ddSImre Deak reg = PIPESTAT(pipe); 19136b12ca56SVille Syrjälä pipe_stats[pipe] = I915_READ(reg) & status_mask; 19146b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 19157e231dbeSJesse Barnes 19167e231dbeSJesse Barnes /* 19177e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 1918132c27c9SVille Syrjälä * 1919132c27c9SVille Syrjälä * Toggle the enable bits to make sure we get an 1920132c27c9SVille Syrjälä * edge in the ISR pipe event bit if we don't clear 1921132c27c9SVille Syrjälä * all the enabled status bits. Otherwise the edge 1922132c27c9SVille Syrjälä * triggered IIR on i965/g4x wouldn't notice that 1923132c27c9SVille Syrjälä * an interrupt is still pending. 19247e231dbeSJesse Barnes */ 1925132c27c9SVille Syrjälä if (pipe_stats[pipe]) { 1926132c27c9SVille Syrjälä I915_WRITE(reg, pipe_stats[pipe]); 1927132c27c9SVille Syrjälä I915_WRITE(reg, enable_mask); 1928132c27c9SVille Syrjälä } 19297e231dbeSJesse Barnes } 193058ead0d7SImre Deak spin_unlock(&dev_priv->irq_lock); 19312ecb8ca4SVille Syrjälä } 19322ecb8ca4SVille Syrjälä 1933eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1934eb64343cSVille Syrjälä u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1935eb64343cSVille Syrjälä { 1936eb64343cSVille Syrjälä enum pipe pipe; 1937eb64343cSVille Syrjälä 1938eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1939eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1940eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1941eb64343cSVille Syrjälä 1942eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1943eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1944eb64343cSVille Syrjälä 1945eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1946eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1947eb64343cSVille Syrjälä } 1948eb64343cSVille Syrjälä } 1949eb64343cSVille Syrjälä 1950eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1951eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1952eb64343cSVille Syrjälä { 1953eb64343cSVille Syrjälä bool blc_event = false; 1954eb64343cSVille Syrjälä enum pipe pipe; 1955eb64343cSVille Syrjälä 1956eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1957eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1958eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1959eb64343cSVille Syrjälä 1960eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1961eb64343cSVille Syrjälä blc_event = true; 1962eb64343cSVille Syrjälä 1963eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1964eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1965eb64343cSVille Syrjälä 1966eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1967eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1968eb64343cSVille Syrjälä } 1969eb64343cSVille Syrjälä 1970eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1971eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 1972eb64343cSVille Syrjälä } 1973eb64343cSVille Syrjälä 1974eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1975eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1976eb64343cSVille Syrjälä { 1977eb64343cSVille Syrjälä bool blc_event = false; 1978eb64343cSVille Syrjälä enum pipe pipe; 1979eb64343cSVille Syrjälä 1980eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1981eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1982eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1983eb64343cSVille Syrjälä 1984eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1985eb64343cSVille Syrjälä blc_event = true; 1986eb64343cSVille Syrjälä 1987eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1988eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1989eb64343cSVille Syrjälä 1990eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1991eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1992eb64343cSVille Syrjälä } 1993eb64343cSVille Syrjälä 1994eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1995eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 1996eb64343cSVille Syrjälä 1997eb64343cSVille Syrjälä if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1998eb64343cSVille Syrjälä gmbus_irq_handler(dev_priv); 1999eb64343cSVille Syrjälä } 2000eb64343cSVille Syrjälä 200191d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 20022ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES]) 20032ecb8ca4SVille Syrjälä { 20042ecb8ca4SVille Syrjälä enum pipe pipe; 20057e231dbeSJesse Barnes 2006055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2007fd3a4024SDaniel Vetter if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2008fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 20094356d586SDaniel Vetter 20104356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 201191d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 20122d9d2b0bSVille Syrjälä 20131f7247c0SDaniel Vetter if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 20141f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 201531acc7f5SJesse Barnes } 201631acc7f5SJesse Barnes 2017c1874ed7SImre Deak if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 201891d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2019c1874ed7SImre Deak } 2020c1874ed7SImre Deak 20211ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 202216c6c56bSVille Syrjälä { 20230ba7c51aSVille Syrjälä u32 hotplug_status = 0, hotplug_status_mask; 20240ba7c51aSVille Syrjälä int i; 202516c6c56bSVille Syrjälä 20260ba7c51aSVille Syrjälä if (IS_G4X(dev_priv) || 20270ba7c51aSVille Syrjälä IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 20280ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 20290ba7c51aSVille Syrjälä DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 20300ba7c51aSVille Syrjälä else 20310ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 20320ba7c51aSVille Syrjälä 20330ba7c51aSVille Syrjälä /* 20340ba7c51aSVille Syrjälä * We absolutely have to clear all the pending interrupt 20350ba7c51aSVille Syrjälä * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 20360ba7c51aSVille Syrjälä * interrupt bit won't have an edge, and the i965/g4x 20370ba7c51aSVille Syrjälä * edge triggered IIR will not notice that an interrupt 20380ba7c51aSVille Syrjälä * is still pending. We can't use PORT_HOTPLUG_EN to 20390ba7c51aSVille Syrjälä * guarantee the edge as the act of toggling the enable 20400ba7c51aSVille Syrjälä * bits can itself generate a new hotplug interrupt :( 20410ba7c51aSVille Syrjälä */ 20420ba7c51aSVille Syrjälä for (i = 0; i < 10; i++) { 20430ba7c51aSVille Syrjälä u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 20440ba7c51aSVille Syrjälä 20450ba7c51aSVille Syrjälä if (tmp == 0) 20460ba7c51aSVille Syrjälä return hotplug_status; 20470ba7c51aSVille Syrjälä 20480ba7c51aSVille Syrjälä hotplug_status |= tmp; 20493ff60f89SOscar Mateo I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 20500ba7c51aSVille Syrjälä } 20510ba7c51aSVille Syrjälä 20520ba7c51aSVille Syrjälä WARN_ONCE(1, 20530ba7c51aSVille Syrjälä "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 20540ba7c51aSVille Syrjälä I915_READ(PORT_HOTPLUG_STAT)); 20551ae3c34cSVille Syrjälä 20561ae3c34cSVille Syrjälä return hotplug_status; 20571ae3c34cSVille Syrjälä } 20581ae3c34cSVille Syrjälä 205991d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 20601ae3c34cSVille Syrjälä u32 hotplug_status) 20611ae3c34cSVille Syrjälä { 20621ae3c34cSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 20633ff60f89SOscar Mateo 206491d14251STvrtko Ursulin if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 206591d14251STvrtko Ursulin IS_CHERRYVIEW(dev_priv)) { 206616c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 206716c6c56bSVille Syrjälä 206858f2cf24SVille Syrjälä if (hotplug_trigger) { 2069cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2070cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2071cf53902fSRodrigo Vivi hpd_status_g4x, 2072fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 207358f2cf24SVille Syrjälä 207491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 207558f2cf24SVille Syrjälä } 2076369712e8SJani Nikula 2077369712e8SJani Nikula if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 207891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 207916c6c56bSVille Syrjälä } else { 208016c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 208116c6c56bSVille Syrjälä 208258f2cf24SVille Syrjälä if (hotplug_trigger) { 2083cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2084cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2085cf53902fSRodrigo Vivi hpd_status_i915, 2086fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 208791d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 208816c6c56bSVille Syrjälä } 20893ff60f89SOscar Mateo } 209058f2cf24SVille Syrjälä } 209116c6c56bSVille Syrjälä 2092c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2093c1874ed7SImre Deak { 209445a83f84SDaniel Vetter struct drm_device *dev = arg; 2095fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2096c1874ed7SImre Deak irqreturn_t ret = IRQ_NONE; 2097c1874ed7SImre Deak 20982dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 20992dd2a883SImre Deak return IRQ_NONE; 21002dd2a883SImre Deak 21011f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21021f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21031f814dacSImre Deak 21041e1cace9SVille Syrjälä do { 21056e814800SVille Syrjälä u32 iir, gt_iir, pm_iir; 21062ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21071ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2108a5e485a9SVille Syrjälä u32 ier = 0; 21093ff60f89SOscar Mateo 2110c1874ed7SImre Deak gt_iir = I915_READ(GTIIR); 2111c1874ed7SImre Deak pm_iir = I915_READ(GEN6_PMIIR); 21123ff60f89SOscar Mateo iir = I915_READ(VLV_IIR); 2113c1874ed7SImre Deak 2114c1874ed7SImre Deak if (gt_iir == 0 && pm_iir == 0 && iir == 0) 21151e1cace9SVille Syrjälä break; 2116c1874ed7SImre Deak 2117c1874ed7SImre Deak ret = IRQ_HANDLED; 2118c1874ed7SImre Deak 2119a5e485a9SVille Syrjälä /* 2120a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2121a5e485a9SVille Syrjälä * 2122a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2123a5e485a9SVille Syrjälä * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2124a5e485a9SVille Syrjälä * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2125a5e485a9SVille Syrjälä * 2126a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2127a5e485a9SVille Syrjälä * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2128a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2129a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2130a5e485a9SVille Syrjälä * bits this time around. 2131a5e485a9SVille Syrjälä */ 21324a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 2133a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2134a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 21354a0a0202SVille Syrjälä 21364a0a0202SVille Syrjälä if (gt_iir) 21374a0a0202SVille Syrjälä I915_WRITE(GTIIR, gt_iir); 21384a0a0202SVille Syrjälä if (pm_iir) 21394a0a0202SVille Syrjälä I915_WRITE(GEN6_PMIIR, pm_iir); 21404a0a0202SVille Syrjälä 21417ce4d1f2SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 21421ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 21437ce4d1f2SVille Syrjälä 21443ff60f89SOscar Mateo /* Call regardless, as some status bits might not be 21453ff60f89SOscar Mateo * signalled in iir */ 2146eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 21477ce4d1f2SVille Syrjälä 2148eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2149eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT)) 2150eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2151eef57324SJerome Anand 21527ce4d1f2SVille Syrjälä /* 21537ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 21547ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 21557ce4d1f2SVille Syrjälä */ 21567ce4d1f2SVille Syrjälä if (iir) 21577ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 21584a0a0202SVille Syrjälä 2159a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 21604a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 21611ae3c34cSVille Syrjälä 216252894874SVille Syrjälä if (gt_iir) 2163261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 216452894874SVille Syrjälä if (pm_iir) 216552894874SVille Syrjälä gen6_rps_irq_handler(dev_priv, pm_iir); 216652894874SVille Syrjälä 21671ae3c34cSVille Syrjälä if (hotplug_status) 216891d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 21692ecb8ca4SVille Syrjälä 217091d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 21711e1cace9SVille Syrjälä } while (0); 21727e231dbeSJesse Barnes 21731f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 21741f814dacSImre Deak 21757e231dbeSJesse Barnes return ret; 21767e231dbeSJesse Barnes } 21777e231dbeSJesse Barnes 217843f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg) 217943f328d7SVille Syrjälä { 218045a83f84SDaniel Vetter struct drm_device *dev = arg; 2181fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 218243f328d7SVille Syrjälä irqreturn_t ret = IRQ_NONE; 218343f328d7SVille Syrjälä 21842dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21852dd2a883SImre Deak return IRQ_NONE; 21862dd2a883SImre Deak 21871f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21881f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21891f814dacSImre Deak 2190579de73bSChris Wilson do { 21916e814800SVille Syrjälä u32 master_ctl, iir; 21922ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21931ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2194f0fd96f5SChris Wilson u32 gt_iir[4]; 2195a5e485a9SVille Syrjälä u32 ier = 0; 2196a5e485a9SVille Syrjälä 21978e5fd599SVille Syrjälä master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 21983278f67fSVille Syrjälä iir = I915_READ(VLV_IIR); 21993278f67fSVille Syrjälä 22003278f67fSVille Syrjälä if (master_ctl == 0 && iir == 0) 22018e5fd599SVille Syrjälä break; 220243f328d7SVille Syrjälä 220327b6c122SOscar Mateo ret = IRQ_HANDLED; 220427b6c122SOscar Mateo 2205a5e485a9SVille Syrjälä /* 2206a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2207a5e485a9SVille Syrjälä * 2208a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2209a5e485a9SVille Syrjälä * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2210a5e485a9SVille Syrjälä * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2211a5e485a9SVille Syrjälä * 2212a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2213a5e485a9SVille Syrjälä * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2214a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2215a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2216a5e485a9SVille Syrjälä * bits this time around. 2217a5e485a9SVille Syrjälä */ 221843f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 2219a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2220a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 222143f328d7SVille Syrjälä 2222e30e251aSVille Syrjälä gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 222327b6c122SOscar Mateo 222427b6c122SOscar Mateo if (iir & I915_DISPLAY_PORT_INTERRUPT) 22251ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 222643f328d7SVille Syrjälä 222727b6c122SOscar Mateo /* Call regardless, as some status bits might not be 222827b6c122SOscar Mateo * signalled in iir */ 2229eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 223043f328d7SVille Syrjälä 2231eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2232eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT | 2233eef57324SJerome Anand I915_LPE_PIPE_C_INTERRUPT)) 2234eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2235eef57324SJerome Anand 22367ce4d1f2SVille Syrjälä /* 22377ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 22387ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 22397ce4d1f2SVille Syrjälä */ 22407ce4d1f2SVille Syrjälä if (iir) 22417ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 22427ce4d1f2SVille Syrjälä 2243a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 2244e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 22451ae3c34cSVille Syrjälä 2246f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2247e30e251aSVille Syrjälä 22481ae3c34cSVille Syrjälä if (hotplug_status) 224991d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 22502ecb8ca4SVille Syrjälä 225191d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2252579de73bSChris Wilson } while (0); 22533278f67fSVille Syrjälä 22541f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 22551f814dacSImre Deak 225643f328d7SVille Syrjälä return ret; 225743f328d7SVille Syrjälä } 225843f328d7SVille Syrjälä 225991d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 226091d14251STvrtko Ursulin u32 hotplug_trigger, 226140e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2262776ad806SJesse Barnes { 226342db67d6SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2264776ad806SJesse Barnes 22656a39d7c9SJani Nikula /* 22666a39d7c9SJani Nikula * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 22676a39d7c9SJani Nikula * unless we touch the hotplug register, even if hotplug_trigger is 22686a39d7c9SJani Nikula * zero. Not acking leads to "The master control interrupt lied (SDE)!" 22696a39d7c9SJani Nikula * errors. 22706a39d7c9SJani Nikula */ 227113cf5504SDave Airlie dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 22726a39d7c9SJani Nikula if (!hotplug_trigger) { 22736a39d7c9SJani Nikula u32 mask = PORTA_HOTPLUG_STATUS_MASK | 22746a39d7c9SJani Nikula PORTD_HOTPLUG_STATUS_MASK | 22756a39d7c9SJani Nikula PORTC_HOTPLUG_STATUS_MASK | 22766a39d7c9SJani Nikula PORTB_HOTPLUG_STATUS_MASK; 22776a39d7c9SJani Nikula dig_hotplug_reg &= ~mask; 22786a39d7c9SJani Nikula } 22796a39d7c9SJani Nikula 228013cf5504SDave Airlie I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 22816a39d7c9SJani Nikula if (!hotplug_trigger) 22826a39d7c9SJani Nikula return; 228313cf5504SDave Airlie 2284cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 228540e56410SVille Syrjälä dig_hotplug_reg, hpd, 2286fd63e2a9SImre Deak pch_port_hotplug_long_detect); 228740e56410SVille Syrjälä 228891d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2289aaf5ec2eSSonika Jindal } 229091d131d2SDaniel Vetter 229191d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 229240e56410SVille Syrjälä { 229340e56410SVille Syrjälä int pipe; 229440e56410SVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 229540e56410SVille Syrjälä 229691d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 229740e56410SVille Syrjälä 2298cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 2299cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2300776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 2301cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2302cfc33bf7SVille Syrjälä port_name(port)); 2303cfc33bf7SVille Syrjälä } 2304776ad806SJesse Barnes 2305ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 230691d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2307ce99c256SDaniel Vetter 2308776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 230991d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2310776ad806SJesse Barnes 2311776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 2312776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2313776ad806SJesse Barnes 2314776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 2315776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2316776ad806SJesse Barnes 2317776ad806SJesse Barnes if (pch_iir & SDE_POISON) 2318776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 2319776ad806SJesse Barnes 23209db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 2321055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 23229db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 23239db4a9c7SJesse Barnes pipe_name(pipe), 23249db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 2325776ad806SJesse Barnes 2326776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2327776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2328776ad806SJesse Barnes 2329776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2330776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2331776ad806SJesse Barnes 2332776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2333a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 23348664281bSPaulo Zanoni 23358664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2336a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 23378664281bSPaulo Zanoni } 23388664281bSPaulo Zanoni 233991d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 23408664281bSPaulo Zanoni { 23418664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 23425a69b89fSDaniel Vetter enum pipe pipe; 23438664281bSPaulo Zanoni 2344de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 2345de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2346de032bf4SPaulo Zanoni 2347055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 23481f7247c0SDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 23491f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 23508664281bSPaulo Zanoni 23515a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 235291d14251STvrtko Ursulin if (IS_IVYBRIDGE(dev_priv)) 235391d14251STvrtko Ursulin ivb_pipe_crc_irq_handler(dev_priv, pipe); 23545a69b89fSDaniel Vetter else 235591d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 23565a69b89fSDaniel Vetter } 23575a69b89fSDaniel Vetter } 23588bf1e9f1SShuang He 23598664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 23608664281bSPaulo Zanoni } 23618664281bSPaulo Zanoni 236291d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 23638664281bSPaulo Zanoni { 23648664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 236545c1cd87SMika Kahola enum pipe pipe; 23668664281bSPaulo Zanoni 2367de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 2368de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 2369de032bf4SPaulo Zanoni 237045c1cd87SMika Kahola for_each_pipe(dev_priv, pipe) 237145c1cd87SMika Kahola if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 237245c1cd87SMika Kahola intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 23738664281bSPaulo Zanoni 23748664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 2375776ad806SJesse Barnes } 2376776ad806SJesse Barnes 237791d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 237823e81d69SAdam Jackson { 237923e81d69SAdam Jackson int pipe; 23806dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2381aaf5ec2eSSonika Jindal 238291d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 238391d131d2SDaniel Vetter 2384cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2385cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 238623e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 2387cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2388cfc33bf7SVille Syrjälä port_name(port)); 2389cfc33bf7SVille Syrjälä } 239023e81d69SAdam Jackson 239123e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 239291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 239323e81d69SAdam Jackson 239423e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 239591d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 239623e81d69SAdam Jackson 239723e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 239823e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 239923e81d69SAdam Jackson 240023e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 240123e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 240223e81d69SAdam Jackson 240323e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 2404055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 240523e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 240623e81d69SAdam Jackson pipe_name(pipe), 240723e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 24088664281bSPaulo Zanoni 24098664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 241091d14251STvrtko Ursulin cpt_serr_int_handler(dev_priv); 241123e81d69SAdam Jackson } 241223e81d69SAdam Jackson 241331604222SAnusha Srivatsa static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 241431604222SAnusha Srivatsa { 241531604222SAnusha Srivatsa u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 241631604222SAnusha Srivatsa u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 241731604222SAnusha Srivatsa u32 pin_mask = 0, long_mask = 0; 241831604222SAnusha Srivatsa 241931604222SAnusha Srivatsa if (ddi_hotplug_trigger) { 242031604222SAnusha Srivatsa u32 dig_hotplug_reg; 242131604222SAnusha Srivatsa 242231604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 242331604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 242431604222SAnusha Srivatsa 242531604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 242631604222SAnusha Srivatsa ddi_hotplug_trigger, 242731604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 242831604222SAnusha Srivatsa icp_ddi_port_hotplug_long_detect); 242931604222SAnusha Srivatsa } 243031604222SAnusha Srivatsa 243131604222SAnusha Srivatsa if (tc_hotplug_trigger) { 243231604222SAnusha Srivatsa u32 dig_hotplug_reg; 243331604222SAnusha Srivatsa 243431604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 243531604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 243631604222SAnusha Srivatsa 243731604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 243831604222SAnusha Srivatsa tc_hotplug_trigger, 243931604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 244031604222SAnusha Srivatsa icp_tc_port_hotplug_long_detect); 244131604222SAnusha Srivatsa } 244231604222SAnusha Srivatsa 244331604222SAnusha Srivatsa if (pin_mask) 244431604222SAnusha Srivatsa intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 244531604222SAnusha Srivatsa 244631604222SAnusha Srivatsa if (pch_iir & SDE_GMBUS_ICP) 244731604222SAnusha Srivatsa gmbus_irq_handler(dev_priv); 244831604222SAnusha Srivatsa } 244931604222SAnusha Srivatsa 245091d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 24516dbf30ceSVille Syrjälä { 24526dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 24536dbf30ceSVille Syrjälä ~SDE_PORTE_HOTPLUG_SPT; 24546dbf30ceSVille Syrjälä u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 24556dbf30ceSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 24566dbf30ceSVille Syrjälä 24576dbf30ceSVille Syrjälä if (hotplug_trigger) { 24586dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24596dbf30ceSVille Syrjälä 24606dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 24616dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 24626dbf30ceSVille Syrjälä 2463cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2464cf53902fSRodrigo Vivi hotplug_trigger, dig_hotplug_reg, hpd_spt, 246574c0b395SVille Syrjälä spt_port_hotplug_long_detect); 24666dbf30ceSVille Syrjälä } 24676dbf30ceSVille Syrjälä 24686dbf30ceSVille Syrjälä if (hotplug2_trigger) { 24696dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24706dbf30ceSVille Syrjälä 24716dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 24726dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 24736dbf30ceSVille Syrjälä 2474cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2475cf53902fSRodrigo Vivi hotplug2_trigger, dig_hotplug_reg, hpd_spt, 24766dbf30ceSVille Syrjälä spt_port_hotplug2_long_detect); 24776dbf30ceSVille Syrjälä } 24786dbf30ceSVille Syrjälä 24796dbf30ceSVille Syrjälä if (pin_mask) 248091d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 24816dbf30ceSVille Syrjälä 24826dbf30ceSVille Syrjälä if (pch_iir & SDE_GMBUS_CPT) 248391d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 24846dbf30ceSVille Syrjälä } 24856dbf30ceSVille Syrjälä 248691d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 248791d14251STvrtko Ursulin u32 hotplug_trigger, 248840e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2489c008bc6eSPaulo Zanoni { 2490e4ce95aaSVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2491e4ce95aaSVille Syrjälä 2492e4ce95aaSVille Syrjälä dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2493e4ce95aaSVille Syrjälä I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2494e4ce95aaSVille Syrjälä 2495cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 249640e56410SVille Syrjälä dig_hotplug_reg, hpd, 2497e4ce95aaSVille Syrjälä ilk_port_hotplug_long_detect); 249840e56410SVille Syrjälä 249991d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2500e4ce95aaSVille Syrjälä } 2501c008bc6eSPaulo Zanoni 250291d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 250391d14251STvrtko Ursulin u32 de_iir) 250440e56410SVille Syrjälä { 250540e56410SVille Syrjälä enum pipe pipe; 250640e56410SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 250740e56410SVille Syrjälä 250840e56410SVille Syrjälä if (hotplug_trigger) 250991d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 251040e56410SVille Syrjälä 2511c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 251291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2513c008bc6eSPaulo Zanoni 2514c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 251591d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2516c008bc6eSPaulo Zanoni 2517c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 2518c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2519c008bc6eSPaulo Zanoni 2520055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2521fd3a4024SDaniel Vetter if (de_iir & DE_PIPE_VBLANK(pipe)) 2522fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2523c008bc6eSPaulo Zanoni 252440da17c2SDaniel Vetter if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 25251f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2526c008bc6eSPaulo Zanoni 252740da17c2SDaniel Vetter if (de_iir & DE_PIPE_CRC_DONE(pipe)) 252891d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2529c008bc6eSPaulo Zanoni } 2530c008bc6eSPaulo Zanoni 2531c008bc6eSPaulo Zanoni /* check event from PCH */ 2532c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 2533c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 2534c008bc6eSPaulo Zanoni 253591d14251STvrtko Ursulin if (HAS_PCH_CPT(dev_priv)) 253691d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 2537c008bc6eSPaulo Zanoni else 253891d14251STvrtko Ursulin ibx_irq_handler(dev_priv, pch_iir); 2539c008bc6eSPaulo Zanoni 2540c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 2541c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 2542c008bc6eSPaulo Zanoni } 2543c008bc6eSPaulo Zanoni 2544cf819effSLucas De Marchi if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 254591d14251STvrtko Ursulin ironlake_rps_change_irq_handler(dev_priv); 2546c008bc6eSPaulo Zanoni } 2547c008bc6eSPaulo Zanoni 254891d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 254991d14251STvrtko Ursulin u32 de_iir) 25509719fb98SPaulo Zanoni { 255107d27e20SDamien Lespiau enum pipe pipe; 255223bb4cb5SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 255323bb4cb5SVille Syrjälä 255440e56410SVille Syrjälä if (hotplug_trigger) 255591d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 25569719fb98SPaulo Zanoni 25579719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 255891d14251STvrtko Ursulin ivb_err_int_handler(dev_priv); 25599719fb98SPaulo Zanoni 256054fd3149SDhinakaran Pandiyan if (de_iir & DE_EDP_PSR_INT_HSW) { 256154fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 256254fd3149SDhinakaran Pandiyan 256354fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 256454fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 256554fd3149SDhinakaran Pandiyan } 2566fc340442SDaniel Vetter 25679719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 256891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 25699719fb98SPaulo Zanoni 25709719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 257191d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 25729719fb98SPaulo Zanoni 2573055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2574fd3a4024SDaniel Vetter if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2575fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 25769719fb98SPaulo Zanoni } 25779719fb98SPaulo Zanoni 25789719fb98SPaulo Zanoni /* check event from PCH */ 257991d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 25809719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 25819719fb98SPaulo Zanoni 258291d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 25839719fb98SPaulo Zanoni 25849719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 25859719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 25869719fb98SPaulo Zanoni } 25879719fb98SPaulo Zanoni } 25889719fb98SPaulo Zanoni 258972c90f62SOscar Mateo /* 259072c90f62SOscar Mateo * To handle irqs with the minimum potential races with fresh interrupts, we: 259172c90f62SOscar Mateo * 1 - Disable Master Interrupt Control. 259272c90f62SOscar Mateo * 2 - Find the source(s) of the interrupt. 259372c90f62SOscar Mateo * 3 - Clear the Interrupt Identity bits (IIR). 259472c90f62SOscar Mateo * 4 - Process the interrupt(s) that had bits set in the IIRs. 259572c90f62SOscar Mateo * 5 - Re-enable Master Interrupt Control. 259672c90f62SOscar Mateo */ 2597f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2598b1f14ad0SJesse Barnes { 259945a83f84SDaniel Vetter struct drm_device *dev = arg; 2600fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2601f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 26020e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 2603b1f14ad0SJesse Barnes 26042dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 26052dd2a883SImre Deak return IRQ_NONE; 26062dd2a883SImre Deak 26071f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26081f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 26091f814dacSImre Deak 2610b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 2611b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 2612b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 26130e43406bSChris Wilson 261444498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 261544498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 261644498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 261744498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 261844498aeaSPaulo Zanoni * due to its back queue). */ 261991d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv)) { 262044498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 262144498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 2622ab5c608bSBen Widawsky } 262344498aeaSPaulo Zanoni 262472c90f62SOscar Mateo /* Find, clear, then process each source of interrupt */ 262572c90f62SOscar Mateo 26260e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 26270e43406bSChris Wilson if (gt_iir) { 262872c90f62SOscar Mateo I915_WRITE(GTIIR, gt_iir); 262972c90f62SOscar Mateo ret = IRQ_HANDLED; 263091d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 2631261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 2632d8fc8a47SPaulo Zanoni else 2633261e40b8SVille Syrjälä ilk_gt_irq_handler(dev_priv, gt_iir); 26340e43406bSChris Wilson } 2635b1f14ad0SJesse Barnes 2636b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 26370e43406bSChris Wilson if (de_iir) { 263872c90f62SOscar Mateo I915_WRITE(DEIIR, de_iir); 263972c90f62SOscar Mateo ret = IRQ_HANDLED; 264091d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) 264191d14251STvrtko Ursulin ivb_display_irq_handler(dev_priv, de_iir); 2642f1af8fc1SPaulo Zanoni else 264391d14251STvrtko Ursulin ilk_display_irq_handler(dev_priv, de_iir); 26440e43406bSChris Wilson } 26450e43406bSChris Wilson 264691d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 2647f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 26480e43406bSChris Wilson if (pm_iir) { 2649b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 26500e43406bSChris Wilson ret = IRQ_HANDLED; 265172c90f62SOscar Mateo gen6_rps_irq_handler(dev_priv, pm_iir); 26520e43406bSChris Wilson } 2653f1af8fc1SPaulo Zanoni } 2654b1f14ad0SJesse Barnes 2655b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 265674093f3eSChris Wilson if (!HAS_PCH_NOP(dev_priv)) 265744498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 2658b1f14ad0SJesse Barnes 26591f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26601f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 26611f814dacSImre Deak 2662b1f14ad0SJesse Barnes return ret; 2663b1f14ad0SJesse Barnes } 2664b1f14ad0SJesse Barnes 266591d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 266691d14251STvrtko Ursulin u32 hotplug_trigger, 266740e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2668d04a492dSShashank Sharma { 2669cebd87a0SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2670d04a492dSShashank Sharma 2671a52bb15bSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2672a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2673d04a492dSShashank Sharma 2674cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 267540e56410SVille Syrjälä dig_hotplug_reg, hpd, 2676cebd87a0SVille Syrjälä bxt_port_hotplug_long_detect); 267740e56410SVille Syrjälä 267891d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2679d04a492dSShashank Sharma } 2680d04a492dSShashank Sharma 2681121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2682121e758eSDhinakaran Pandiyan { 2683121e758eSDhinakaran Pandiyan u32 pin_mask = 0, long_mask = 0; 2684b796b971SDhinakaran Pandiyan u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2685b796b971SDhinakaran Pandiyan u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2686121e758eSDhinakaran Pandiyan 2687121e758eSDhinakaran Pandiyan if (trigger_tc) { 2688b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2689b796b971SDhinakaran Pandiyan 2690121e758eSDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2691121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2692121e758eSDhinakaran Pandiyan 2693121e758eSDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2694b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2695121e758eSDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2696121e758eSDhinakaran Pandiyan } 2697b796b971SDhinakaran Pandiyan 2698b796b971SDhinakaran Pandiyan if (trigger_tbt) { 2699b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2700b796b971SDhinakaran Pandiyan 2701b796b971SDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2702b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2703b796b971SDhinakaran Pandiyan 2704b796b971SDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2705b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2706b796b971SDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2707b796b971SDhinakaran Pandiyan } 2708b796b971SDhinakaran Pandiyan 2709b796b971SDhinakaran Pandiyan if (pin_mask) 2710b796b971SDhinakaran Pandiyan intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2711b796b971SDhinakaran Pandiyan else 2712b796b971SDhinakaran Pandiyan DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2713121e758eSDhinakaran Pandiyan } 2714121e758eSDhinakaran Pandiyan 2715f11a0f46STvrtko Ursulin static irqreturn_t 2716f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2717abd58f01SBen Widawsky { 2718abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 2719f11a0f46STvrtko Ursulin u32 iir; 2720c42664ccSDaniel Vetter enum pipe pipe; 272188e04703SJesse Barnes 2722abd58f01SBen Widawsky if (master_ctl & GEN8_DE_MISC_IRQ) { 2723e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_MISC_IIR); 2724e32192e1STvrtko Ursulin if (iir) { 2725e04f7eceSVille Syrjälä bool found = false; 2726e04f7eceSVille Syrjälä 2727e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_MISC_IIR, iir); 2728abd58f01SBen Widawsky ret = IRQ_HANDLED; 2729e04f7eceSVille Syrjälä 2730e04f7eceSVille Syrjälä if (iir & GEN8_DE_MISC_GSE) { 273191d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2732e04f7eceSVille Syrjälä found = true; 2733e04f7eceSVille Syrjälä } 2734e04f7eceSVille Syrjälä 2735e04f7eceSVille Syrjälä if (iir & GEN8_DE_EDP_PSR) { 273654fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 273754fd3149SDhinakaran Pandiyan 273854fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 273954fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 2740e04f7eceSVille Syrjälä found = true; 2741e04f7eceSVille Syrjälä } 2742e04f7eceSVille Syrjälä 2743e04f7eceSVille Syrjälä if (!found) 274438cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Misc interrupt\n"); 2745abd58f01SBen Widawsky } 274638cc46d7SOscar Mateo else 274738cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2748abd58f01SBen Widawsky } 2749abd58f01SBen Widawsky 2750121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2751121e758eSDhinakaran Pandiyan iir = I915_READ(GEN11_DE_HPD_IIR); 2752121e758eSDhinakaran Pandiyan if (iir) { 2753121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IIR, iir); 2754121e758eSDhinakaran Pandiyan ret = IRQ_HANDLED; 2755121e758eSDhinakaran Pandiyan gen11_hpd_irq_handler(dev_priv, iir); 2756121e758eSDhinakaran Pandiyan } else { 2757121e758eSDhinakaran Pandiyan DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2758121e758eSDhinakaran Pandiyan } 2759121e758eSDhinakaran Pandiyan } 2760121e758eSDhinakaran Pandiyan 27616d766f02SDaniel Vetter if (master_ctl & GEN8_DE_PORT_IRQ) { 2762e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PORT_IIR); 2763e32192e1STvrtko Ursulin if (iir) { 2764e32192e1STvrtko Ursulin u32 tmp_mask; 2765d04a492dSShashank Sharma bool found = false; 2766cebd87a0SVille Syrjälä 2767e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PORT_IIR, iir); 27686d766f02SDaniel Vetter ret = IRQ_HANDLED; 276988e04703SJesse Barnes 2770e32192e1STvrtko Ursulin tmp_mask = GEN8_AUX_CHANNEL_A; 2771bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2772e32192e1STvrtko Ursulin tmp_mask |= GEN9_AUX_CHANNEL_B | 2773e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_C | 2774e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_D; 2775e32192e1STvrtko Ursulin 2776bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 2777bb187e93SJames Ausmus tmp_mask |= ICL_AUX_CHANNEL_E; 2778bb187e93SJames Ausmus 27799bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || 27809bb635d9SDhinakaran Pandiyan INTEL_GEN(dev_priv) >= 11) 2781a324fcacSRodrigo Vivi tmp_mask |= CNL_AUX_CHANNEL_F; 2782a324fcacSRodrigo Vivi 2783e32192e1STvrtko Ursulin if (iir & tmp_mask) { 278491d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2785d04a492dSShashank Sharma found = true; 2786d04a492dSShashank Sharma } 2787d04a492dSShashank Sharma 2788cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) { 2789e32192e1STvrtko Ursulin tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2790e32192e1STvrtko Ursulin if (tmp_mask) { 279191d14251STvrtko Ursulin bxt_hpd_irq_handler(dev_priv, tmp_mask, 279291d14251STvrtko Ursulin hpd_bxt); 2793d04a492dSShashank Sharma found = true; 2794d04a492dSShashank Sharma } 2795e32192e1STvrtko Ursulin } else if (IS_BROADWELL(dev_priv)) { 2796e32192e1STvrtko Ursulin tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2797e32192e1STvrtko Ursulin if (tmp_mask) { 279891d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, 279991d14251STvrtko Ursulin tmp_mask, hpd_bdw); 2800e32192e1STvrtko Ursulin found = true; 2801e32192e1STvrtko Ursulin } 2802e32192e1STvrtko Ursulin } 2803d04a492dSShashank Sharma 2804cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 280591d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 28069e63743eSShashank Sharma found = true; 28079e63743eSShashank Sharma } 28089e63743eSShashank Sharma 2809d04a492dSShashank Sharma if (!found) 281038cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Port interrupt\n"); 28116d766f02SDaniel Vetter } 281238cc46d7SOscar Mateo else 281338cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 28146d766f02SDaniel Vetter } 28156d766f02SDaniel Vetter 2816055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2817fd3a4024SDaniel Vetter u32 fault_errors; 2818abd58f01SBen Widawsky 2819c42664ccSDaniel Vetter if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2820c42664ccSDaniel Vetter continue; 2821c42664ccSDaniel Vetter 2822e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2823e32192e1STvrtko Ursulin if (!iir) { 2824e32192e1STvrtko Ursulin DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2825e32192e1STvrtko Ursulin continue; 2826e32192e1STvrtko Ursulin } 2827770de83dSDamien Lespiau 2828e32192e1STvrtko Ursulin ret = IRQ_HANDLED; 2829e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2830e32192e1STvrtko Ursulin 2831fd3a4024SDaniel Vetter if (iir & GEN8_PIPE_VBLANK) 2832fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2833abd58f01SBen Widawsky 2834e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 283591d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 28360fbe7870SDaniel Vetter 2837e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2838e32192e1STvrtko Ursulin intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 283938d83c96SDaniel Vetter 2840e32192e1STvrtko Ursulin fault_errors = iir; 2841bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2842e32192e1STvrtko Ursulin fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2843770de83dSDamien Lespiau else 2844e32192e1STvrtko Ursulin fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2845770de83dSDamien Lespiau 2846770de83dSDamien Lespiau if (fault_errors) 28471353ec38STvrtko Ursulin DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 284830100f2bSDaniel Vetter pipe_name(pipe), 2849e32192e1STvrtko Ursulin fault_errors); 2850abd58f01SBen Widawsky } 2851abd58f01SBen Widawsky 285291d14251STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2853266ea3d9SShashank Sharma master_ctl & GEN8_DE_PCH_IRQ) { 285492d03a80SDaniel Vetter /* 285592d03a80SDaniel Vetter * FIXME(BDW): Assume for now that the new interrupt handling 285692d03a80SDaniel Vetter * scheme also closed the SDE interrupt handling race we've seen 285792d03a80SDaniel Vetter * on older pch-split platforms. But this needs testing. 285892d03a80SDaniel Vetter */ 2859e32192e1STvrtko Ursulin iir = I915_READ(SDEIIR); 2860e32192e1STvrtko Ursulin if (iir) { 2861e32192e1STvrtko Ursulin I915_WRITE(SDEIIR, iir); 286292d03a80SDaniel Vetter ret = IRQ_HANDLED; 28636dbf30ceSVille Syrjälä 286431604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 286531604222SAnusha Srivatsa icp_irq_handler(dev_priv, iir); 286631604222SAnusha Srivatsa else if (HAS_PCH_SPT(dev_priv) || 286731604222SAnusha Srivatsa HAS_PCH_KBP(dev_priv) || 28687b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 286991d14251STvrtko Ursulin spt_irq_handler(dev_priv, iir); 28706dbf30ceSVille Syrjälä else 287191d14251STvrtko Ursulin cpt_irq_handler(dev_priv, iir); 28722dfb0b81SJani Nikula } else { 28732dfb0b81SJani Nikula /* 28742dfb0b81SJani Nikula * Like on previous PCH there seems to be something 28752dfb0b81SJani Nikula * fishy going on with forwarding PCH interrupts. 28762dfb0b81SJani Nikula */ 28772dfb0b81SJani Nikula DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 28782dfb0b81SJani Nikula } 287992d03a80SDaniel Vetter } 288092d03a80SDaniel Vetter 2881f11a0f46STvrtko Ursulin return ret; 2882f11a0f46STvrtko Ursulin } 2883f11a0f46STvrtko Ursulin 28844376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs) 28854376b9c9SMika Kuoppala { 28864376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 28874376b9c9SMika Kuoppala 28884376b9c9SMika Kuoppala /* 28894376b9c9SMika Kuoppala * Now with master disabled, get a sample of level indications 28904376b9c9SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 28914376b9c9SMika Kuoppala * New indications can and will light up during processing, 28924376b9c9SMika Kuoppala * and will generate new interrupt after enabling master. 28934376b9c9SMika Kuoppala */ 28944376b9c9SMika Kuoppala return raw_reg_read(regs, GEN8_MASTER_IRQ); 28954376b9c9SMika Kuoppala } 28964376b9c9SMika Kuoppala 28974376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs) 28984376b9c9SMika Kuoppala { 28994376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 29004376b9c9SMika Kuoppala } 29014376b9c9SMika Kuoppala 2902f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg) 2903f11a0f46STvrtko Ursulin { 2904f0fd96f5SChris Wilson struct drm_i915_private *dev_priv = to_i915(arg); 29054376b9c9SMika Kuoppala void __iomem * const regs = dev_priv->regs; 2906f11a0f46STvrtko Ursulin u32 master_ctl; 2907f0fd96f5SChris Wilson u32 gt_iir[4]; 2908f11a0f46STvrtko Ursulin 2909f11a0f46STvrtko Ursulin if (!intel_irqs_enabled(dev_priv)) 2910f11a0f46STvrtko Ursulin return IRQ_NONE; 2911f11a0f46STvrtko Ursulin 29124376b9c9SMika Kuoppala master_ctl = gen8_master_intr_disable(regs); 29134376b9c9SMika Kuoppala if (!master_ctl) { 29144376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 2915f11a0f46STvrtko Ursulin return IRQ_NONE; 29164376b9c9SMika Kuoppala } 2917f11a0f46STvrtko Ursulin 2918f11a0f46STvrtko Ursulin /* Find, clear, then process each source of interrupt */ 291955ef72f2SChris Wilson gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2920f0fd96f5SChris Wilson 2921f0fd96f5SChris Wilson /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2922f0fd96f5SChris Wilson if (master_ctl & ~GEN8_GT_IRQS) { 2923f0fd96f5SChris Wilson disable_rpm_wakeref_asserts(dev_priv); 292455ef72f2SChris Wilson gen8_de_irq_handler(dev_priv, master_ctl); 2925f0fd96f5SChris Wilson enable_rpm_wakeref_asserts(dev_priv); 2926f0fd96f5SChris Wilson } 2927f11a0f46STvrtko Ursulin 29284376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 2929abd58f01SBen Widawsky 2930f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 29311f814dacSImre Deak 293255ef72f2SChris Wilson return IRQ_HANDLED; 2933abd58f01SBen Widawsky } 2934abd58f01SBen Widawsky 293536703e79SChris Wilson struct wedge_me { 293636703e79SChris Wilson struct delayed_work work; 293736703e79SChris Wilson struct drm_i915_private *i915; 293836703e79SChris Wilson const char *name; 293936703e79SChris Wilson }; 294036703e79SChris Wilson 294136703e79SChris Wilson static void wedge_me(struct work_struct *work) 294236703e79SChris Wilson { 294336703e79SChris Wilson struct wedge_me *w = container_of(work, typeof(*w), work.work); 294436703e79SChris Wilson 294536703e79SChris Wilson dev_err(w->i915->drm.dev, 294636703e79SChris Wilson "%s timed out, cancelling all in-flight rendering.\n", 294736703e79SChris Wilson w->name); 294836703e79SChris Wilson i915_gem_set_wedged(w->i915); 294936703e79SChris Wilson } 295036703e79SChris Wilson 295136703e79SChris Wilson static void __init_wedge(struct wedge_me *w, 295236703e79SChris Wilson struct drm_i915_private *i915, 295336703e79SChris Wilson long timeout, 295436703e79SChris Wilson const char *name) 295536703e79SChris Wilson { 295636703e79SChris Wilson w->i915 = i915; 295736703e79SChris Wilson w->name = name; 295836703e79SChris Wilson 295936703e79SChris Wilson INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 296036703e79SChris Wilson schedule_delayed_work(&w->work, timeout); 296136703e79SChris Wilson } 296236703e79SChris Wilson 296336703e79SChris Wilson static void __fini_wedge(struct wedge_me *w) 296436703e79SChris Wilson { 296536703e79SChris Wilson cancel_delayed_work_sync(&w->work); 296636703e79SChris Wilson destroy_delayed_work_on_stack(&w->work); 296736703e79SChris Wilson w->i915 = NULL; 296836703e79SChris Wilson } 296936703e79SChris Wilson 297036703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 297136703e79SChris Wilson for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 297236703e79SChris Wilson (W)->i915; \ 297336703e79SChris Wilson __fini_wedge((W))) 297436703e79SChris Wilson 297551951ae7SMika Kuoppala static u32 2976f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915, 297751951ae7SMika Kuoppala const unsigned int bank, const unsigned int bit) 297851951ae7SMika Kuoppala { 297951951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 298051951ae7SMika Kuoppala u32 timeout_ts; 298151951ae7SMika Kuoppala u32 ident; 298251951ae7SMika Kuoppala 298396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 298496606f3bSOscar Mateo 298551951ae7SMika Kuoppala raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 298651951ae7SMika Kuoppala 298751951ae7SMika Kuoppala /* 298851951ae7SMika Kuoppala * NB: Specs do not specify how long to spin wait, 298951951ae7SMika Kuoppala * so we do ~100us as an educated guess. 299051951ae7SMika Kuoppala */ 299151951ae7SMika Kuoppala timeout_ts = (local_clock() >> 10) + 100; 299251951ae7SMika Kuoppala do { 299351951ae7SMika Kuoppala ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 299451951ae7SMika Kuoppala } while (!(ident & GEN11_INTR_DATA_VALID) && 299551951ae7SMika Kuoppala !time_after32(local_clock() >> 10, timeout_ts)); 299651951ae7SMika Kuoppala 299751951ae7SMika Kuoppala if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 299851951ae7SMika Kuoppala DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 299951951ae7SMika Kuoppala bank, bit, ident); 300051951ae7SMika Kuoppala return 0; 300151951ae7SMika Kuoppala } 300251951ae7SMika Kuoppala 300351951ae7SMika Kuoppala raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 300451951ae7SMika Kuoppala GEN11_INTR_DATA_VALID); 300551951ae7SMika Kuoppala 3006f744dbc2SMika Kuoppala return ident; 3007f744dbc2SMika Kuoppala } 3008f744dbc2SMika Kuoppala 3009f744dbc2SMika Kuoppala static void 3010f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915, 3011f744dbc2SMika Kuoppala const u8 instance, const u16 iir) 3012f744dbc2SMika Kuoppala { 3013d02b98b8SOscar Mateo if (instance == OTHER_GTPM_INSTANCE) 3014d02b98b8SOscar Mateo return gen6_rps_irq_handler(i915, iir); 3015d02b98b8SOscar Mateo 3016f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3017f744dbc2SMika Kuoppala instance, iir); 3018f744dbc2SMika Kuoppala } 3019f744dbc2SMika Kuoppala 3020f744dbc2SMika Kuoppala static void 3021f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915, 3022f744dbc2SMika Kuoppala const u8 class, const u8 instance, const u16 iir) 3023f744dbc2SMika Kuoppala { 3024f744dbc2SMika Kuoppala struct intel_engine_cs *engine; 3025f744dbc2SMika Kuoppala 3026f744dbc2SMika Kuoppala if (instance <= MAX_ENGINE_INSTANCE) 3027f744dbc2SMika Kuoppala engine = i915->engine_class[class][instance]; 3028f744dbc2SMika Kuoppala else 3029f744dbc2SMika Kuoppala engine = NULL; 3030f744dbc2SMika Kuoppala 3031f744dbc2SMika Kuoppala if (likely(engine)) 3032f744dbc2SMika Kuoppala return gen8_cs_irq_handler(engine, iir); 3033f744dbc2SMika Kuoppala 3034f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3035f744dbc2SMika Kuoppala class, instance); 3036f744dbc2SMika Kuoppala } 3037f744dbc2SMika Kuoppala 3038f744dbc2SMika Kuoppala static void 3039f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915, 3040f744dbc2SMika Kuoppala const u32 identity) 3041f744dbc2SMika Kuoppala { 3042f744dbc2SMika Kuoppala const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3043f744dbc2SMika Kuoppala const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3044f744dbc2SMika Kuoppala const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3045f744dbc2SMika Kuoppala 3046f744dbc2SMika Kuoppala if (unlikely(!intr)) 3047f744dbc2SMika Kuoppala return; 3048f744dbc2SMika Kuoppala 3049f744dbc2SMika Kuoppala if (class <= COPY_ENGINE_CLASS) 3050f744dbc2SMika Kuoppala return gen11_engine_irq_handler(i915, class, instance, intr); 3051f744dbc2SMika Kuoppala 3052f744dbc2SMika Kuoppala if (class == OTHER_CLASS) 3053f744dbc2SMika Kuoppala return gen11_other_irq_handler(i915, instance, intr); 3054f744dbc2SMika Kuoppala 3055f744dbc2SMika Kuoppala WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3056f744dbc2SMika Kuoppala class, instance, intr); 305751951ae7SMika Kuoppala } 305851951ae7SMika Kuoppala 305951951ae7SMika Kuoppala static void 306096606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915, 306196606f3bSOscar Mateo const unsigned int bank) 306251951ae7SMika Kuoppala { 306351951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 306451951ae7SMika Kuoppala unsigned long intr_dw; 306551951ae7SMika Kuoppala unsigned int bit; 306651951ae7SMika Kuoppala 306796606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 306851951ae7SMika Kuoppala 306951951ae7SMika Kuoppala intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 307051951ae7SMika Kuoppala 307151951ae7SMika Kuoppala if (unlikely(!intr_dw)) { 307251951ae7SMika Kuoppala DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 307396606f3bSOscar Mateo return; 307451951ae7SMika Kuoppala } 307551951ae7SMika Kuoppala 307651951ae7SMika Kuoppala for_each_set_bit(bit, &intr_dw, 32) { 3077f744dbc2SMika Kuoppala const u32 ident = gen11_gt_engine_identity(i915, 3078f744dbc2SMika Kuoppala bank, bit); 307951951ae7SMika Kuoppala 3080f744dbc2SMika Kuoppala gen11_gt_identity_handler(i915, ident); 308151951ae7SMika Kuoppala } 308251951ae7SMika Kuoppala 308351951ae7SMika Kuoppala /* Clear must be after shared has been served for engine */ 308451951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 308551951ae7SMika Kuoppala } 308696606f3bSOscar Mateo 308796606f3bSOscar Mateo static void 308896606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915, 308996606f3bSOscar Mateo const u32 master_ctl) 309096606f3bSOscar Mateo { 309196606f3bSOscar Mateo unsigned int bank; 309296606f3bSOscar Mateo 309396606f3bSOscar Mateo spin_lock(&i915->irq_lock); 309496606f3bSOscar Mateo 309596606f3bSOscar Mateo for (bank = 0; bank < 2; bank++) { 309696606f3bSOscar Mateo if (master_ctl & GEN11_GT_DW_IRQ(bank)) 309796606f3bSOscar Mateo gen11_gt_bank_handler(i915, bank); 309896606f3bSOscar Mateo } 309996606f3bSOscar Mateo 310096606f3bSOscar Mateo spin_unlock(&i915->irq_lock); 310151951ae7SMika Kuoppala } 310251951ae7SMika Kuoppala 31037a909383SChris Wilson static u32 31047a909383SChris Wilson gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3105df0d28c1SDhinakaran Pandiyan { 3106df0d28c1SDhinakaran Pandiyan void __iomem * const regs = dev_priv->regs; 31077a909383SChris Wilson u32 iir; 3108df0d28c1SDhinakaran Pandiyan 3109df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 31107a909383SChris Wilson return 0; 3111df0d28c1SDhinakaran Pandiyan 31127a909383SChris Wilson iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 31137a909383SChris Wilson if (likely(iir)) 31147a909383SChris Wilson raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 31157a909383SChris Wilson 31167a909383SChris Wilson return iir; 3117df0d28c1SDhinakaran Pandiyan } 3118df0d28c1SDhinakaran Pandiyan 3119df0d28c1SDhinakaran Pandiyan static void 31207a909383SChris Wilson gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3121df0d28c1SDhinakaran Pandiyan { 3122df0d28c1SDhinakaran Pandiyan if (iir & GEN11_GU_MISC_GSE) 3123df0d28c1SDhinakaran Pandiyan intel_opregion_asle_intr(dev_priv); 3124df0d28c1SDhinakaran Pandiyan } 3125df0d28c1SDhinakaran Pandiyan 312681067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs) 312781067b71SMika Kuoppala { 312881067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 312981067b71SMika Kuoppala 313081067b71SMika Kuoppala /* 313181067b71SMika Kuoppala * Now with master disabled, get a sample of level indications 313281067b71SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 313381067b71SMika Kuoppala * New indications can and will light up during processing, 313481067b71SMika Kuoppala * and will generate new interrupt after enabling master. 313581067b71SMika Kuoppala */ 313681067b71SMika Kuoppala return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 313781067b71SMika Kuoppala } 313881067b71SMika Kuoppala 313981067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs) 314081067b71SMika Kuoppala { 314181067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 314281067b71SMika Kuoppala } 314381067b71SMika Kuoppala 314451951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg) 314551951ae7SMika Kuoppala { 314651951ae7SMika Kuoppala struct drm_i915_private * const i915 = to_i915(arg); 314751951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 314851951ae7SMika Kuoppala u32 master_ctl; 3149df0d28c1SDhinakaran Pandiyan u32 gu_misc_iir; 315051951ae7SMika Kuoppala 315151951ae7SMika Kuoppala if (!intel_irqs_enabled(i915)) 315251951ae7SMika Kuoppala return IRQ_NONE; 315351951ae7SMika Kuoppala 315481067b71SMika Kuoppala master_ctl = gen11_master_intr_disable(regs); 315581067b71SMika Kuoppala if (!master_ctl) { 315681067b71SMika Kuoppala gen11_master_intr_enable(regs); 315751951ae7SMika Kuoppala return IRQ_NONE; 315881067b71SMika Kuoppala } 315951951ae7SMika Kuoppala 316051951ae7SMika Kuoppala /* Find, clear, then process each source of interrupt. */ 316151951ae7SMika Kuoppala gen11_gt_irq_handler(i915, master_ctl); 316251951ae7SMika Kuoppala 316351951ae7SMika Kuoppala /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 316451951ae7SMika Kuoppala if (master_ctl & GEN11_DISPLAY_IRQ) { 316551951ae7SMika Kuoppala const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 316651951ae7SMika Kuoppala 316751951ae7SMika Kuoppala disable_rpm_wakeref_asserts(i915); 316851951ae7SMika Kuoppala /* 316951951ae7SMika Kuoppala * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 317051951ae7SMika Kuoppala * for the display related bits. 317151951ae7SMika Kuoppala */ 317251951ae7SMika Kuoppala gen8_de_irq_handler(i915, disp_ctl); 317351951ae7SMika Kuoppala enable_rpm_wakeref_asserts(i915); 317451951ae7SMika Kuoppala } 317551951ae7SMika Kuoppala 31767a909383SChris Wilson gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3177df0d28c1SDhinakaran Pandiyan 317881067b71SMika Kuoppala gen11_master_intr_enable(regs); 317951951ae7SMika Kuoppala 31807a909383SChris Wilson gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3181df0d28c1SDhinakaran Pandiyan 318251951ae7SMika Kuoppala return IRQ_HANDLED; 318351951ae7SMika Kuoppala } 318451951ae7SMika Kuoppala 3185ce800754SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv, 3186d0667e9cSChris Wilson u32 engine_mask, 3187d0667e9cSChris Wilson const char *reason) 31888a905236SJesse Barnes { 3189ce800754SChris Wilson struct i915_gpu_error *error = &dev_priv->gpu_error; 319091c8a326SChris Wilson struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3191cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3192cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3193cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 319436703e79SChris Wilson struct wedge_me w; 31958a905236SJesse Barnes 3196c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 31978a905236SJesse Barnes 319844d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 3199c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 32001f83fee0SDaniel Vetter 320136703e79SChris Wilson /* Use a watchdog to ensure that our reset completes */ 320236703e79SChris Wilson i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3203c033666aSChris Wilson intel_prepare_reset(dev_priv); 32047514747dSVille Syrjälä 3205d0667e9cSChris Wilson error->reason = reason; 3206d0667e9cSChris Wilson error->stalled_mask = engine_mask; 3207ce800754SChris Wilson 320836703e79SChris Wilson /* Signal that locked waiters should reset the GPU */ 3209d0667e9cSChris Wilson smp_mb__before_atomic(); 3210ce800754SChris Wilson set_bit(I915_RESET_HANDOFF, &error->flags); 3211ce800754SChris Wilson wake_up_all(&error->wait_queue); 32128c185ecaSChris Wilson 321336703e79SChris Wilson /* Wait for anyone holding the lock to wakeup, without 321436703e79SChris Wilson * blocking indefinitely on struct_mutex. 321517e1df07SDaniel Vetter */ 321636703e79SChris Wilson do { 3217780f262aSChris Wilson if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3218d0667e9cSChris Wilson i915_reset(dev_priv, engine_mask, reason); 3219221fe799SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 3220780f262aSChris Wilson } 3221ce800754SChris Wilson } while (wait_on_bit_timeout(&error->flags, 32228c185ecaSChris Wilson I915_RESET_HANDOFF, 3223780f262aSChris Wilson TASK_UNINTERRUPTIBLE, 322436703e79SChris Wilson 1)); 3225f69061beSDaniel Vetter 3226d0667e9cSChris Wilson error->stalled_mask = 0; 3227ce800754SChris Wilson error->reason = NULL; 3228ce800754SChris Wilson 3229c033666aSChris Wilson intel_finish_reset(dev_priv); 323036703e79SChris Wilson } 3231f454c694SImre Deak 3232ce800754SChris Wilson if (!test_bit(I915_WEDGED, &error->flags)) 3233ce800754SChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3234f316a42cSBen Gamari } 32358a905236SJesse Barnes 323609605548SLionel Landwerlin void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3237c0e09200SDave Airlie { 3238eaa14c24SChris Wilson u32 eir; 323963eeaf38SJesse Barnes 3240cf819effSLucas De Marchi if (!IS_GEN(dev_priv, 2)) 3241eaa14c24SChris Wilson I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 324263eeaf38SJesse Barnes 3243eaa14c24SChris Wilson if (INTEL_GEN(dev_priv) < 4) 3244eaa14c24SChris Wilson I915_WRITE(IPEIR, I915_READ(IPEIR)); 3245eaa14c24SChris Wilson else 3246eaa14c24SChris Wilson I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 32478a905236SJesse Barnes 3248eaa14c24SChris Wilson I915_WRITE(EIR, I915_READ(EIR)); 324963eeaf38SJesse Barnes eir = I915_READ(EIR); 325063eeaf38SJesse Barnes if (eir) { 325163eeaf38SJesse Barnes /* 325263eeaf38SJesse Barnes * some errors might have become stuck, 325363eeaf38SJesse Barnes * mask them. 325463eeaf38SJesse Barnes */ 3255eaa14c24SChris Wilson DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 325663eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 325778c357ddSVille Syrjälä I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 325863eeaf38SJesse Barnes } 325909605548SLionel Landwerlin 326009605548SLionel Landwerlin if (INTEL_GEN(dev_priv) >= 8) { 326109605548SLionel Landwerlin I915_WRITE(GEN8_RING_FAULT_REG, 326209605548SLionel Landwerlin I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); 326309605548SLionel Landwerlin POSTING_READ(GEN8_RING_FAULT_REG); 326409605548SLionel Landwerlin } else if (INTEL_GEN(dev_priv) >= 6) { 326509605548SLionel Landwerlin struct intel_engine_cs *engine; 326609605548SLionel Landwerlin enum intel_engine_id id; 326709605548SLionel Landwerlin 326809605548SLionel Landwerlin for_each_engine(engine, dev_priv, id) { 326909605548SLionel Landwerlin I915_WRITE(RING_FAULT_REG(engine), 327009605548SLionel Landwerlin I915_READ(RING_FAULT_REG(engine)) & 327109605548SLionel Landwerlin ~RING_FAULT_VALID); 327209605548SLionel Landwerlin } 327309605548SLionel Landwerlin POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); 327409605548SLionel Landwerlin } 327535aed2e6SChris Wilson } 327635aed2e6SChris Wilson 327735aed2e6SChris Wilson /** 3278b8d24a06SMika Kuoppala * i915_handle_error - handle a gpu error 327914bb2c11STvrtko Ursulin * @dev_priv: i915 device private 328014b730fcSarun.siluvery@linux.intel.com * @engine_mask: mask representing engines that are hung 3281ce800754SChris Wilson * @flags: control flags 328287c390b6SMichel Thierry * @fmt: Error message format string 328387c390b6SMichel Thierry * 3284aafd8581SJavier Martinez Canillas * Do some basic checking of register state at error time and 328535aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 328635aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 328735aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 328835aed2e6SChris Wilson * of a ring dump etc.). 328935aed2e6SChris Wilson */ 3290c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv, 3291c033666aSChris Wilson u32 engine_mask, 3292ce800754SChris Wilson unsigned long flags, 329358174462SMika Kuoppala const char *fmt, ...) 329435aed2e6SChris Wilson { 3295142bc7d9SMichel Thierry struct intel_engine_cs *engine; 3296142bc7d9SMichel Thierry unsigned int tmp; 329758174462SMika Kuoppala char error_msg[80]; 3298ce800754SChris Wilson char *msg = NULL; 3299ce800754SChris Wilson 3300ce800754SChris Wilson if (fmt) { 3301ce800754SChris Wilson va_list args; 330235aed2e6SChris Wilson 330358174462SMika Kuoppala va_start(args, fmt); 330458174462SMika Kuoppala vscnprintf(error_msg, sizeof(error_msg), fmt, args); 330558174462SMika Kuoppala va_end(args); 330658174462SMika Kuoppala 3307ce800754SChris Wilson msg = error_msg; 3308ce800754SChris Wilson } 3309ce800754SChris Wilson 33101604a86dSChris Wilson /* 33111604a86dSChris Wilson * In most cases it's guaranteed that we get here with an RPM 33121604a86dSChris Wilson * reference held, for example because there is a pending GPU 33131604a86dSChris Wilson * request that won't finish until the reset is done. This 33141604a86dSChris Wilson * isn't the case at least when we get here by doing a 33151604a86dSChris Wilson * simulated reset via debugfs, so get an RPM reference. 33161604a86dSChris Wilson */ 33171604a86dSChris Wilson intel_runtime_pm_get(dev_priv); 33181604a86dSChris Wilson 3319873d66fbSChris Wilson engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3320ce800754SChris Wilson 3321ce800754SChris Wilson if (flags & I915_ERROR_CAPTURE) { 3322ce800754SChris Wilson i915_capture_error_state(dev_priv, engine_mask, msg); 3323eaa14c24SChris Wilson i915_clear_error_registers(dev_priv); 3324ce800754SChris Wilson } 33258a905236SJesse Barnes 3326142bc7d9SMichel Thierry /* 3327142bc7d9SMichel Thierry * Try engine reset when available. We fall back to full reset if 3328142bc7d9SMichel Thierry * single reset fails. 3329142bc7d9SMichel Thierry */ 33302bfbf6feSChris Wilson if (intel_has_reset_engine(dev_priv) && 33312bfbf6feSChris Wilson !i915_terminally_wedged(&dev_priv->gpu_error)) { 3332142bc7d9SMichel Thierry for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 33339db529aaSDaniel Vetter BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3334142bc7d9SMichel Thierry if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3335142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3336142bc7d9SMichel Thierry continue; 3337142bc7d9SMichel Thierry 3338ce800754SChris Wilson if (i915_reset_engine(engine, msg) == 0) 3339142bc7d9SMichel Thierry engine_mask &= ~intel_engine_flag(engine); 3340142bc7d9SMichel Thierry 3341142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3342142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3343142bc7d9SMichel Thierry wake_up_bit(&dev_priv->gpu_error.flags, 3344142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id); 3345142bc7d9SMichel Thierry } 3346142bc7d9SMichel Thierry } 3347142bc7d9SMichel Thierry 33488af29b0cSChris Wilson if (!engine_mask) 33491604a86dSChris Wilson goto out; 33508af29b0cSChris Wilson 3351142bc7d9SMichel Thierry /* Full reset needs the mutex, stop any other user trying to do so. */ 3352d5367307SChris Wilson if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3353d5367307SChris Wilson wait_event(dev_priv->gpu_error.reset_queue, 3354d5367307SChris Wilson !test_bit(I915_RESET_BACKOFF, 3355d5367307SChris Wilson &dev_priv->gpu_error.flags)); 33561604a86dSChris Wilson goto out; 3357d5367307SChris Wilson } 3358ba1234d1SBen Gamari 3359142bc7d9SMichel Thierry /* Prevent any other reset-engine attempt. */ 3360142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3361142bc7d9SMichel Thierry while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3362142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3363142bc7d9SMichel Thierry wait_on_bit(&dev_priv->gpu_error.flags, 3364142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id, 3365142bc7d9SMichel Thierry TASK_UNINTERRUPTIBLE); 3366142bc7d9SMichel Thierry } 3367142bc7d9SMichel Thierry 3368d0667e9cSChris Wilson i915_reset_device(dev_priv, engine_mask, msg); 3369d5367307SChris Wilson 3370142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3371142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3372142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3373142bc7d9SMichel Thierry } 3374142bc7d9SMichel Thierry 3375d5367307SChris Wilson clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3376d5367307SChris Wilson wake_up_all(&dev_priv->gpu_error.reset_queue); 33771604a86dSChris Wilson 33781604a86dSChris Wilson out: 33791604a86dSChris Wilson intel_runtime_pm_put(dev_priv); 33808a905236SJesse Barnes } 33818a905236SJesse Barnes 338242f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 338342f52ef8SKeith Packard * we use as a pipe index 338442f52ef8SKeith Packard */ 338586e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 33860a3e67a4SJesse Barnes { 3387fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3388e9d21d7fSKeith Packard unsigned long irqflags; 338971e0ffa5SJesse Barnes 33901ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 339186e83e35SChris Wilson i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 339286e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 339386e83e35SChris Wilson 339486e83e35SChris Wilson return 0; 339586e83e35SChris Wilson } 339686e83e35SChris Wilson 339786e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 339886e83e35SChris Wilson { 339986e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 340086e83e35SChris Wilson unsigned long irqflags; 340186e83e35SChris Wilson 340286e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34037c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 3404755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34051ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34068692d00eSChris Wilson 34070a3e67a4SJesse Barnes return 0; 34080a3e67a4SJesse Barnes } 34090a3e67a4SJesse Barnes 341088e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3411f796cf8fSJesse Barnes { 3412fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3413f796cf8fSJesse Barnes unsigned long irqflags; 341455b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 341586e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3416f796cf8fSJesse Barnes 3417f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3418fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, bit); 3419b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3420b1f14ad0SJesse Barnes 34212e8bf223SDhinakaran Pandiyan /* Even though there is no DMC, frame counter can get stuck when 34222e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated. 34232e8bf223SDhinakaran Pandiyan */ 34242e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34252e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34262e8bf223SDhinakaran Pandiyan 3427b1f14ad0SJesse Barnes return 0; 3428b1f14ad0SJesse Barnes } 3429b1f14ad0SJesse Barnes 343088e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3431abd58f01SBen Widawsky { 3432fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3433abd58f01SBen Widawsky unsigned long irqflags; 3434abd58f01SBen Widawsky 3435abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3436013d3752SVille Syrjälä bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3437abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3438013d3752SVille Syrjälä 34392e8bf223SDhinakaran Pandiyan /* Even if there is no DMC, frame counter can get stuck when 34402e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated, so check only for PSR. 34412e8bf223SDhinakaran Pandiyan */ 34422e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34432e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34442e8bf223SDhinakaran Pandiyan 3445abd58f01SBen Widawsky return 0; 3446abd58f01SBen Widawsky } 3447abd58f01SBen Widawsky 344842f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 344942f52ef8SKeith Packard * we use as a pipe index 345042f52ef8SKeith Packard */ 345186e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 345286e83e35SChris Wilson { 345386e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 345486e83e35SChris Wilson unsigned long irqflags; 345586e83e35SChris Wilson 345686e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 345786e83e35SChris Wilson i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 345886e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 345986e83e35SChris Wilson } 346086e83e35SChris Wilson 346186e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 34620a3e67a4SJesse Barnes { 3463fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3464e9d21d7fSKeith Packard unsigned long irqflags; 34650a3e67a4SJesse Barnes 34661ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34677c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 3468755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34691ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34700a3e67a4SJesse Barnes } 34710a3e67a4SJesse Barnes 347288e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3473f796cf8fSJesse Barnes { 3474fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3475f796cf8fSJesse Barnes unsigned long irqflags; 347655b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 347786e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3478f796cf8fSJesse Barnes 3479f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3480fbdedaeaSVille Syrjälä ilk_disable_display_irq(dev_priv, bit); 3481b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3482b1f14ad0SJesse Barnes } 3483b1f14ad0SJesse Barnes 348488e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3485abd58f01SBen Widawsky { 3486fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3487abd58f01SBen Widawsky unsigned long irqflags; 3488abd58f01SBen Widawsky 3489abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3490013d3752SVille Syrjälä bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3491abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3492abd58f01SBen Widawsky } 3493abd58f01SBen Widawsky 3494b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv) 349591738a95SPaulo Zanoni { 34966e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 349791738a95SPaulo Zanoni return; 349891738a95SPaulo Zanoni 34993488d4ebSVille Syrjälä GEN3_IRQ_RESET(SDE); 3500105b122eSPaulo Zanoni 35016e266956STvrtko Ursulin if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3502105b122eSPaulo Zanoni I915_WRITE(SERR_INT, 0xffffffff); 3503622364b6SPaulo Zanoni } 3504105b122eSPaulo Zanoni 350591738a95SPaulo Zanoni /* 3506622364b6SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed PCH 3507622364b6SPaulo Zanoni * interrupts. Hence we can't update it after the interrupt handler is enabled - 3508622364b6SPaulo Zanoni * instead we unconditionally enable all PCH interrupt sources here, but then 3509622364b6SPaulo Zanoni * only unmask them as needed with SDEIMR. 3510622364b6SPaulo Zanoni * 3511622364b6SPaulo Zanoni * This function needs to be called before interrupts are enabled. 351291738a95SPaulo Zanoni */ 3513622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev) 3514622364b6SPaulo Zanoni { 3515fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3516622364b6SPaulo Zanoni 35176e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3518622364b6SPaulo Zanoni return; 3519622364b6SPaulo Zanoni 3520622364b6SPaulo Zanoni WARN_ON(I915_READ(SDEIER) != 0); 352191738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 352291738a95SPaulo Zanoni POSTING_READ(SDEIER); 352391738a95SPaulo Zanoni } 352491738a95SPaulo Zanoni 3525b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3526d18ea1b5SDaniel Vetter { 35273488d4ebSVille Syrjälä GEN3_IRQ_RESET(GT); 3528b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 35293488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN6_PM); 3530d18ea1b5SDaniel Vetter } 3531d18ea1b5SDaniel Vetter 353270591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 353370591a41SVille Syrjälä { 353471b8b41dSVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 353571b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 353671b8b41dSVille Syrjälä else 353771b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 353871b8b41dSVille Syrjälä 3539ad22d106SVille Syrjälä i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 354070591a41SVille Syrjälä I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 354170591a41SVille Syrjälä 354244d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 354370591a41SVille Syrjälä 35443488d4ebSVille Syrjälä GEN3_IRQ_RESET(VLV_); 35458bd099a7SChris Wilson dev_priv->irq_mask = ~0u; 354670591a41SVille Syrjälä } 354770591a41SVille Syrjälä 35488bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 35498bb61306SVille Syrjälä { 35508bb61306SVille Syrjälä u32 pipestat_mask; 35519ab981f2SVille Syrjälä u32 enable_mask; 35528bb61306SVille Syrjälä enum pipe pipe; 35538bb61306SVille Syrjälä 3554842ebf7aSVille Syrjälä pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 35558bb61306SVille Syrjälä 35568bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 35578bb61306SVille Syrjälä for_each_pipe(dev_priv, pipe) 35588bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 35598bb61306SVille Syrjälä 35609ab981f2SVille Syrjälä enable_mask = I915_DISPLAY_PORT_INTERRUPT | 35618bb61306SVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3562ebf5f921SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3563ebf5f921SVille Syrjälä I915_LPE_PIPE_A_INTERRUPT | 3564ebf5f921SVille Syrjälä I915_LPE_PIPE_B_INTERRUPT; 3565ebf5f921SVille Syrjälä 35668bb61306SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 3567ebf5f921SVille Syrjälä enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3568ebf5f921SVille Syrjälä I915_LPE_PIPE_C_INTERRUPT; 35696b7eafc1SVille Syrjälä 35708bd099a7SChris Wilson WARN_ON(dev_priv->irq_mask != ~0u); 35716b7eafc1SVille Syrjälä 35729ab981f2SVille Syrjälä dev_priv->irq_mask = ~enable_mask; 35738bb61306SVille Syrjälä 35743488d4ebSVille Syrjälä GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 35758bb61306SVille Syrjälä } 35768bb61306SVille Syrjälä 35778bb61306SVille Syrjälä /* drm_dma.h hooks 35788bb61306SVille Syrjälä */ 35798bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev) 35808bb61306SVille Syrjälä { 3581fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35828bb61306SVille Syrjälä 35833488d4ebSVille Syrjälä GEN3_IRQ_RESET(DE); 3584cf819effSLucas De Marchi if (IS_GEN(dev_priv, 7)) 35858bb61306SVille Syrjälä I915_WRITE(GEN7_ERR_INT, 0xffffffff); 35868bb61306SVille Syrjälä 3587fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 3588fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3589fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3590fc340442SDaniel Vetter } 3591fc340442SDaniel Vetter 3592b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 35938bb61306SVille Syrjälä 3594b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 35958bb61306SVille Syrjälä } 35968bb61306SVille Syrjälä 35976bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev) 35987e231dbeSJesse Barnes { 3599fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 36007e231dbeSJesse Barnes 360134c7b8a7SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 360234c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 360334c7b8a7SVille Syrjälä 3604b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 36057e231dbeSJesse Barnes 3606ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 36079918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 360870591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3609ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 36107e231dbeSJesse Barnes } 36117e231dbeSJesse Barnes 3612d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3613d6e3cca3SDaniel Vetter { 3614d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 0); 3615d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 1); 3616d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 2); 3617d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 3); 3618d6e3cca3SDaniel Vetter } 3619d6e3cca3SDaniel Vetter 3620823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev) 3621abd58f01SBen Widawsky { 3622fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3623abd58f01SBen Widawsky int pipe; 3624abd58f01SBen Widawsky 36254376b9c9SMika Kuoppala gen8_master_intr_disable(dev_priv->regs); 3626abd58f01SBen Widawsky 3627d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 3628abd58f01SBen Widawsky 3629e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3630e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3631e04f7eceSVille Syrjälä 3632055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 3633f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 3634813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 3635f86f3fb0SPaulo Zanoni GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3636abd58f01SBen Widawsky 36373488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_PORT_); 36383488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_MISC_); 36393488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 3640abd58f01SBen Widawsky 36416e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 3642b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 3643abd58f01SBen Widawsky } 3644abd58f01SBen Widawsky 364551951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 364651951ae7SMika Kuoppala { 364751951ae7SMika Kuoppala /* Disable RCS, BCS, VCS and VECS class engines. */ 364851951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 364951951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 365051951ae7SMika Kuoppala 365151951ae7SMika Kuoppala /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 365251951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 365351951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 365451951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 365551951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 365651951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3657d02b98b8SOscar Mateo 3658d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3659d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 366051951ae7SMika Kuoppala } 366151951ae7SMika Kuoppala 366251951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev) 366351951ae7SMika Kuoppala { 366451951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 366551951ae7SMika Kuoppala int pipe; 366651951ae7SMika Kuoppala 366781067b71SMika Kuoppala gen11_master_intr_disable(dev_priv->regs); 366851951ae7SMika Kuoppala 366951951ae7SMika Kuoppala gen11_gt_irq_reset(dev_priv); 367051951ae7SMika Kuoppala 367151951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 367251951ae7SMika Kuoppala 367362819dfdSJosé Roberto de Souza I915_WRITE(EDP_PSR_IMR, 0xffffffff); 367462819dfdSJosé Roberto de Souza I915_WRITE(EDP_PSR_IIR, 0xffffffff); 367562819dfdSJosé Roberto de Souza 367651951ae7SMika Kuoppala for_each_pipe(dev_priv, pipe) 367751951ae7SMika Kuoppala if (intel_display_power_is_enabled(dev_priv, 367851951ae7SMika Kuoppala POWER_DOMAIN_PIPE(pipe))) 367951951ae7SMika Kuoppala GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 368051951ae7SMika Kuoppala 368151951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_PORT_); 368251951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_MISC_); 3683121e758eSDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_DE_HPD_); 3684df0d28c1SDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_GU_MISC_); 368551951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_PCU_); 368631604222SAnusha Srivatsa 368731604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 368831604222SAnusha Srivatsa GEN3_IRQ_RESET(SDE); 368951951ae7SMika Kuoppala } 369051951ae7SMika Kuoppala 36914c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3692001bd2cbSImre Deak u8 pipe_mask) 3693d49bdb0eSPaulo Zanoni { 36941180e206SPaulo Zanoni uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 36956831f3e3SVille Syrjälä enum pipe pipe; 3696d49bdb0eSPaulo Zanoni 369713321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 36989dfe2e3aSImre Deak 36999dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 37009dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 37019dfe2e3aSImre Deak return; 37029dfe2e3aSImre Deak } 37039dfe2e3aSImre Deak 37046831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37056831f3e3SVille Syrjälä GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 37066831f3e3SVille Syrjälä dev_priv->de_irq_mask[pipe], 37076831f3e3SVille Syrjälä ~dev_priv->de_irq_mask[pipe] | extra_ier); 37089dfe2e3aSImre Deak 370913321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 3710d49bdb0eSPaulo Zanoni } 3711d49bdb0eSPaulo Zanoni 3712aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3713001bd2cbSImre Deak u8 pipe_mask) 3714aae8ba84SVille Syrjälä { 37156831f3e3SVille Syrjälä enum pipe pipe; 37166831f3e3SVille Syrjälä 3717aae8ba84SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37189dfe2e3aSImre Deak 37199dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 37209dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 37219dfe2e3aSImre Deak return; 37229dfe2e3aSImre Deak } 37239dfe2e3aSImre Deak 37246831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37256831f3e3SVille Syrjälä GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 37269dfe2e3aSImre Deak 3727aae8ba84SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 3728aae8ba84SVille Syrjälä 3729aae8ba84SVille Syrjälä /* make sure we're done processing display irqs */ 373091c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 3731aae8ba84SVille Syrjälä } 3732aae8ba84SVille Syrjälä 37336bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev) 373443f328d7SVille Syrjälä { 3735fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 373643f328d7SVille Syrjälä 373743f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 373843f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 373943f328d7SVille Syrjälä 3740d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 374143f328d7SVille Syrjälä 37423488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 374343f328d7SVille Syrjälä 3744ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37459918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 374670591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3747ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 374843f328d7SVille Syrjälä } 374943f328d7SVille Syrjälä 375091d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 375187a02106SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 375287a02106SVille Syrjälä { 375387a02106SVille Syrjälä struct intel_encoder *encoder; 375487a02106SVille Syrjälä u32 enabled_irqs = 0; 375587a02106SVille Syrjälä 375691c8a326SChris Wilson for_each_intel_encoder(&dev_priv->drm, encoder) 375787a02106SVille Syrjälä if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 375887a02106SVille Syrjälä enabled_irqs |= hpd[encoder->hpd_pin]; 375987a02106SVille Syrjälä 376087a02106SVille Syrjälä return enabled_irqs; 376187a02106SVille Syrjälä } 376287a02106SVille Syrjälä 37631a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 37641a56b1a2SImre Deak { 37651a56b1a2SImre Deak u32 hotplug; 37661a56b1a2SImre Deak 37671a56b1a2SImre Deak /* 37681a56b1a2SImre Deak * Enable digital hotplug on the PCH, and configure the DP short pulse 37691a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec). 37701a56b1a2SImre Deak * The pulse duration bits are reserved on LPT+. 37711a56b1a2SImre Deak */ 37721a56b1a2SImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 37731a56b1a2SImre Deak hotplug &= ~(PORTB_PULSE_DURATION_MASK | 37741a56b1a2SImre Deak PORTC_PULSE_DURATION_MASK | 37751a56b1a2SImre Deak PORTD_PULSE_DURATION_MASK); 37761a56b1a2SImre Deak hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 37771a56b1a2SImre Deak hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 37781a56b1a2SImre Deak hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 37791a56b1a2SImre Deak /* 37801a56b1a2SImre Deak * When CPU and PCH are on the same package, port A 37811a56b1a2SImre Deak * HPD must be enabled in both north and south. 37821a56b1a2SImre Deak */ 37831a56b1a2SImre Deak if (HAS_PCH_LPT_LP(dev_priv)) 37841a56b1a2SImre Deak hotplug |= PORTA_HOTPLUG_ENABLE; 37851a56b1a2SImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 37861a56b1a2SImre Deak } 37871a56b1a2SImre Deak 378891d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 378982a28bcfSDaniel Vetter { 37901a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 379182a28bcfSDaniel Vetter 379291d14251STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) { 3793fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 379491d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 379582a28bcfSDaniel Vetter } else { 3796fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 379791d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 379882a28bcfSDaniel Vetter } 379982a28bcfSDaniel Vetter 3800fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 380182a28bcfSDaniel Vetter 38021a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 38036dbf30ceSVille Syrjälä } 380426951cafSXiong Zhang 380531604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 380631604222SAnusha Srivatsa { 380731604222SAnusha Srivatsa u32 hotplug; 380831604222SAnusha Srivatsa 380931604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_DDI); 381031604222SAnusha Srivatsa hotplug |= ICP_DDIA_HPD_ENABLE | 381131604222SAnusha Srivatsa ICP_DDIB_HPD_ENABLE; 381231604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 381331604222SAnusha Srivatsa 381431604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_TC); 381531604222SAnusha Srivatsa hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 381631604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC2) | 381731604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC3) | 381831604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC4); 381931604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 382031604222SAnusha Srivatsa } 382131604222SAnusha Srivatsa 382231604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 382331604222SAnusha Srivatsa { 382431604222SAnusha Srivatsa u32 hotplug_irqs, enabled_irqs; 382531604222SAnusha Srivatsa 382631604222SAnusha Srivatsa hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 382731604222SAnusha Srivatsa enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 382831604222SAnusha Srivatsa 382931604222SAnusha Srivatsa ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 383031604222SAnusha Srivatsa 383131604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 383231604222SAnusha Srivatsa } 383331604222SAnusha Srivatsa 3834121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3835121e758eSDhinakaran Pandiyan { 3836121e758eSDhinakaran Pandiyan u32 hotplug; 3837121e758eSDhinakaran Pandiyan 3838121e758eSDhinakaran Pandiyan hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3839121e758eSDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3840121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3841121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3842121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3843121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3844b796b971SDhinakaran Pandiyan 3845b796b971SDhinakaran Pandiyan hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3846b796b971SDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3847b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3848b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3849b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3850b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3851121e758eSDhinakaran Pandiyan } 3852121e758eSDhinakaran Pandiyan 3853121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3854121e758eSDhinakaran Pandiyan { 3855121e758eSDhinakaran Pandiyan u32 hotplug_irqs, enabled_irqs; 3856121e758eSDhinakaran Pandiyan u32 val; 3857121e758eSDhinakaran Pandiyan 3858b796b971SDhinakaran Pandiyan enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3859b796b971SDhinakaran Pandiyan hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3860121e758eSDhinakaran Pandiyan 3861121e758eSDhinakaran Pandiyan val = I915_READ(GEN11_DE_HPD_IMR); 3862121e758eSDhinakaran Pandiyan val &= ~hotplug_irqs; 3863121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IMR, val); 3864121e758eSDhinakaran Pandiyan POSTING_READ(GEN11_DE_HPD_IMR); 3865121e758eSDhinakaran Pandiyan 3866121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 386731604222SAnusha Srivatsa 386831604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 386931604222SAnusha Srivatsa icp_hpd_irq_setup(dev_priv); 3870121e758eSDhinakaran Pandiyan } 3871121e758eSDhinakaran Pandiyan 38722a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 38732a57d9ccSImre Deak { 38743b92e263SRodrigo Vivi u32 val, hotplug; 38753b92e263SRodrigo Vivi 38763b92e263SRodrigo Vivi /* Display WA #1179 WaHardHangonHotPlug: cnp */ 38773b92e263SRodrigo Vivi if (HAS_PCH_CNP(dev_priv)) { 38783b92e263SRodrigo Vivi val = I915_READ(SOUTH_CHICKEN1); 38793b92e263SRodrigo Vivi val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 38803b92e263SRodrigo Vivi val |= CHASSIS_CLK_REQ_DURATION(0xf); 38813b92e263SRodrigo Vivi I915_WRITE(SOUTH_CHICKEN1, val); 38823b92e263SRodrigo Vivi } 38832a57d9ccSImre Deak 38842a57d9ccSImre Deak /* Enable digital hotplug on the PCH */ 38852a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 38862a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 38872a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 38882a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE | 38892a57d9ccSImre Deak PORTD_HOTPLUG_ENABLE; 38902a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 38912a57d9ccSImre Deak 38922a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG2); 38932a57d9ccSImre Deak hotplug |= PORTE_HOTPLUG_ENABLE; 38942a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 38952a57d9ccSImre Deak } 38962a57d9ccSImre Deak 389791d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 38986dbf30ceSVille Syrjälä { 38992a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39006dbf30ceSVille Syrjälä 39016dbf30ceSVille Syrjälä hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 390291d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 39036dbf30ceSVille Syrjälä 39046dbf30ceSVille Syrjälä ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 39056dbf30ceSVille Syrjälä 39062a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 390726951cafSXiong Zhang } 39087fe0b973SKeith Packard 39091a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 39101a56b1a2SImre Deak { 39111a56b1a2SImre Deak u32 hotplug; 39121a56b1a2SImre Deak 39131a56b1a2SImre Deak /* 39141a56b1a2SImre Deak * Enable digital hotplug on the CPU, and configure the DP short pulse 39151a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec) 39161a56b1a2SImre Deak * The pulse duration bits are reserved on HSW+. 39171a56b1a2SImre Deak */ 39181a56b1a2SImre Deak hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 39191a56b1a2SImre Deak hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 39201a56b1a2SImre Deak hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 39211a56b1a2SImre Deak DIGITAL_PORTA_PULSE_DURATION_2ms; 39221a56b1a2SImre Deak I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 39231a56b1a2SImre Deak } 39241a56b1a2SImre Deak 392591d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3926e4ce95aaSVille Syrjälä { 39271a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 3928e4ce95aaSVille Syrjälä 392991d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 8) { 39303a3b3c7dSVille Syrjälä hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 393191d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 39323a3b3c7dSVille Syrjälä 39333a3b3c7dSVille Syrjälä bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 393491d14251STvrtko Ursulin } else if (INTEL_GEN(dev_priv) >= 7) { 393523bb4cb5SVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 393691d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 39373a3b3c7dSVille Syrjälä 39383a3b3c7dSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 393923bb4cb5SVille Syrjälä } else { 3940e4ce95aaSVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG; 394191d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3942e4ce95aaSVille Syrjälä 3943e4ce95aaSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 39443a3b3c7dSVille Syrjälä } 3945e4ce95aaSVille Syrjälä 39461a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 3947e4ce95aaSVille Syrjälä 394891d14251STvrtko Ursulin ibx_hpd_irq_setup(dev_priv); 3949e4ce95aaSVille Syrjälä } 3950e4ce95aaSVille Syrjälä 39512a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 39522a57d9ccSImre Deak u32 enabled_irqs) 3953e0a20ad7SShashank Sharma { 39542a57d9ccSImre Deak u32 hotplug; 3955e0a20ad7SShashank Sharma 3956a52bb15bSVille Syrjälä hotplug = I915_READ(PCH_PORT_HOTPLUG); 39572a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 39582a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 39592a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE; 3960d252bf68SShubhangi Shrivastava 3961d252bf68SShubhangi Shrivastava DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3962d252bf68SShubhangi Shrivastava hotplug, enabled_irqs); 3963d252bf68SShubhangi Shrivastava hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3964d252bf68SShubhangi Shrivastava 3965d252bf68SShubhangi Shrivastava /* 3966d252bf68SShubhangi Shrivastava * For BXT invert bit has to be set based on AOB design 3967d252bf68SShubhangi Shrivastava * for HPD detection logic, update it based on VBT fields. 3968d252bf68SShubhangi Shrivastava */ 3969d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3970d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3971d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIA_HPD_INVERT; 3972d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3973d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3974d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIB_HPD_INVERT; 3975d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3976d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3977d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIC_HPD_INVERT; 3978d252bf68SShubhangi Shrivastava 3979a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3980e0a20ad7SShashank Sharma } 3981e0a20ad7SShashank Sharma 39822a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 39832a57d9ccSImre Deak { 39842a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 39852a57d9ccSImre Deak } 39862a57d9ccSImre Deak 39872a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 39882a57d9ccSImre Deak { 39892a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39902a57d9ccSImre Deak 39912a57d9ccSImre Deak enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 39922a57d9ccSImre Deak hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 39932a57d9ccSImre Deak 39942a57d9ccSImre Deak bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 39952a57d9ccSImre Deak 39962a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 39972a57d9ccSImre Deak } 39982a57d9ccSImre Deak 3999d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 4000d46da437SPaulo Zanoni { 4001fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 400282a28bcfSDaniel Vetter u32 mask; 4003d46da437SPaulo Zanoni 40046e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 4005692a04cfSDaniel Vetter return; 4006692a04cfSDaniel Vetter 40076e266956STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) 40085c673b60SDaniel Vetter mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 40094ebc6509SDhinakaran Pandiyan else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 40105c673b60SDaniel Vetter mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 40114ebc6509SDhinakaran Pandiyan else 40124ebc6509SDhinakaran Pandiyan mask = SDE_GMBUS_CPT; 40138664281bSPaulo Zanoni 40143488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4015d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 40162a57d9ccSImre Deak 40172a57d9ccSImre Deak if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 40182a57d9ccSImre Deak HAS_PCH_LPT(dev_priv)) 40191a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 40202a57d9ccSImre Deak else 40212a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 4022d46da437SPaulo Zanoni } 4023d46da437SPaulo Zanoni 40240a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 40250a9a8c91SDaniel Vetter { 4026fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40270a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 40280a9a8c91SDaniel Vetter 40290a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 40300a9a8c91SDaniel Vetter 40310a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 40323c9192bcSTvrtko Ursulin if (HAS_L3_DPF(dev_priv)) { 40330a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 4034772c2a51STvrtko Ursulin dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4035772c2a51STvrtko Ursulin gt_irqs |= GT_PARITY_ERROR(dev_priv); 40360a9a8c91SDaniel Vetter } 40370a9a8c91SDaniel Vetter 40380a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 4039cf819effSLucas De Marchi if (IS_GEN(dev_priv, 5)) { 4040f8973c21SChris Wilson gt_irqs |= ILK_BSD_USER_INTERRUPT; 40410a9a8c91SDaniel Vetter } else { 40420a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 40430a9a8c91SDaniel Vetter } 40440a9a8c91SDaniel Vetter 40453488d4ebSVille Syrjälä GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 40460a9a8c91SDaniel Vetter 4047b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 404878e68d36SImre Deak /* 404978e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS 405078e68d36SImre Deak * itself is enabled/disabled. 405178e68d36SImre Deak */ 4052f4e9af4fSAkash Goel if (HAS_VEBOX(dev_priv)) { 40530a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4054f4e9af4fSAkash Goel dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4055f4e9af4fSAkash Goel } 40560a9a8c91SDaniel Vetter 4057f4e9af4fSAkash Goel dev_priv->pm_imr = 0xffffffff; 40583488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 40590a9a8c91SDaniel Vetter } 40600a9a8c91SDaniel Vetter } 40610a9a8c91SDaniel Vetter 4062f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 4063036a4a7dSZhenyu Wang { 4064fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40658e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 40668e76f8dcSPaulo Zanoni 4067b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) { 40688e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4069842ebf7aSVille Syrjälä DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 40708e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 407123bb4cb5SVille Syrjälä DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 407223bb4cb5SVille Syrjälä DE_DP_A_HOTPLUG_IVB); 40738e76f8dcSPaulo Zanoni } else { 40748e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4075842ebf7aSVille Syrjälä DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4076842ebf7aSVille Syrjälä DE_PIPEA_CRC_DONE | DE_POISON); 4077e4ce95aaSVille Syrjälä extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4078e4ce95aaSVille Syrjälä DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4079e4ce95aaSVille Syrjälä DE_DP_A_HOTPLUG); 40808e76f8dcSPaulo Zanoni } 4081036a4a7dSZhenyu Wang 4082fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 4083fc340442SDaniel Vetter gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 40841aeb1b5fSDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4085fc340442SDaniel Vetter display_mask |= DE_EDP_PSR_INT_HSW; 4086fc340442SDaniel Vetter } 4087fc340442SDaniel Vetter 40881ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 4089036a4a7dSZhenyu Wang 4090622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4091622364b6SPaulo Zanoni 40923488d4ebSVille Syrjälä GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4093036a4a7dSZhenyu Wang 40940a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 4095036a4a7dSZhenyu Wang 40961a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 40971a56b1a2SImre Deak 4098d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 40997fe0b973SKeith Packard 410050a0bc90STvrtko Ursulin if (IS_IRONLAKE_M(dev_priv)) { 41016005ce42SDaniel Vetter /* Enable PCU event interrupts 41026005ce42SDaniel Vetter * 41036005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 41044bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 41054bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 4106d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4107fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4108d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4109f97108d1SJesse Barnes } 4110f97108d1SJesse Barnes 4111036a4a7dSZhenyu Wang return 0; 4112036a4a7dSZhenyu Wang } 4113036a4a7dSZhenyu Wang 4114f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4115f8b79e58SImre Deak { 411667520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4117f8b79e58SImre Deak 4118f8b79e58SImre Deak if (dev_priv->display_irqs_enabled) 4119f8b79e58SImre Deak return; 4120f8b79e58SImre Deak 4121f8b79e58SImre Deak dev_priv->display_irqs_enabled = true; 4122f8b79e58SImre Deak 4123d6c69803SVille Syrjälä if (intel_irqs_enabled(dev_priv)) { 4124d6c69803SVille Syrjälä vlv_display_irq_reset(dev_priv); 4125ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4126f8b79e58SImre Deak } 4127d6c69803SVille Syrjälä } 4128f8b79e58SImre Deak 4129f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4130f8b79e58SImre Deak { 413167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4132f8b79e58SImre Deak 4133f8b79e58SImre Deak if (!dev_priv->display_irqs_enabled) 4134f8b79e58SImre Deak return; 4135f8b79e58SImre Deak 4136f8b79e58SImre Deak dev_priv->display_irqs_enabled = false; 4137f8b79e58SImre Deak 4138950eabafSImre Deak if (intel_irqs_enabled(dev_priv)) 4139ad22d106SVille Syrjälä vlv_display_irq_reset(dev_priv); 4140f8b79e58SImre Deak } 4141f8b79e58SImre Deak 41420e6c9a9eSVille Syrjälä 41430e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev) 41440e6c9a9eSVille Syrjälä { 4145fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 41460e6c9a9eSVille Syrjälä 41470a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 41487e231dbeSJesse Barnes 4149ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 41509918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4151ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4152ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4153ad22d106SVille Syrjälä 41547e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 415534c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 415620afbda2SDaniel Vetter 415720afbda2SDaniel Vetter return 0; 415820afbda2SDaniel Vetter } 415920afbda2SDaniel Vetter 4160abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4161abd58f01SBen Widawsky { 4162abd58f01SBen Widawsky /* These are interrupts we'll toggle with the ring mask register */ 4163abd58f01SBen Widawsky uint32_t gt_interrupts[] = { 4164abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 416573d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 416673d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 416773d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4168abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 416973d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 417073d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 417173d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4172abd58f01SBen Widawsky 0, 417373d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 417473d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4175abd58f01SBen Widawsky }; 4176abd58f01SBen Widawsky 4177f4e9af4fSAkash Goel dev_priv->pm_ier = 0x0; 4178f4e9af4fSAkash Goel dev_priv->pm_imr = ~dev_priv->pm_ier; 41799a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 41809a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 418178e68d36SImre Deak /* 418278e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS itself 418326705e20SSagar Arun Kamble * is enabled/disabled. Same wil be the case for GuC interrupts. 418478e68d36SImre Deak */ 4185f4e9af4fSAkash Goel GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 41869a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4187abd58f01SBen Widawsky } 4188abd58f01SBen Widawsky 4189abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4190abd58f01SBen Widawsky { 4191770de83dSDamien Lespiau uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4192770de83dSDamien Lespiau uint32_t de_pipe_enables; 41933a3b3c7dSVille Syrjälä u32 de_port_masked = GEN8_AUX_CHANNEL_A; 41943a3b3c7dSVille Syrjälä u32 de_port_enables; 4195df0d28c1SDhinakaran Pandiyan u32 de_misc_masked = GEN8_DE_EDP_PSR; 41963a3b3c7dSVille Syrjälä enum pipe pipe; 4197770de83dSDamien Lespiau 4198df0d28c1SDhinakaran Pandiyan if (INTEL_GEN(dev_priv) <= 10) 4199df0d28c1SDhinakaran Pandiyan de_misc_masked |= GEN8_DE_MISC_GSE; 4200df0d28c1SDhinakaran Pandiyan 4201bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) { 4202842ebf7aSVille Syrjälä de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 42033a3b3c7dSVille Syrjälä de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 420488e04703SJesse Barnes GEN9_AUX_CHANNEL_D; 4205cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 42063a3b3c7dSVille Syrjälä de_port_masked |= BXT_DE_PORT_GMBUS; 42073a3b3c7dSVille Syrjälä } else { 4208842ebf7aSVille Syrjälä de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 42093a3b3c7dSVille Syrjälä } 4210770de83dSDamien Lespiau 4211bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 4212bb187e93SJames Ausmus de_port_masked |= ICL_AUX_CHANNEL_E; 4213bb187e93SJames Ausmus 42149bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4215a324fcacSRodrigo Vivi de_port_masked |= CNL_AUX_CHANNEL_F; 4216a324fcacSRodrigo Vivi 4217770de83dSDamien Lespiau de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4218770de83dSDamien Lespiau GEN8_PIPE_FIFO_UNDERRUN; 4219770de83dSDamien Lespiau 42203a3b3c7dSVille Syrjälä de_port_enables = de_port_masked; 4221cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4222a52bb15bSVille Syrjälä de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4223a52bb15bSVille Syrjälä else if (IS_BROADWELL(dev_priv)) 42243a3b3c7dSVille Syrjälä de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 42253a3b3c7dSVille Syrjälä 4226e04f7eceSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 422754fd3149SDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4228e04f7eceSVille Syrjälä 42290a195c02SMika Kahola for_each_pipe(dev_priv, pipe) { 42300a195c02SMika Kahola dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4231abd58f01SBen Widawsky 4232f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 4233813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 4234813bde43SPaulo Zanoni GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4235813bde43SPaulo Zanoni dev_priv->de_irq_mask[pipe], 423635079899SPaulo Zanoni de_pipe_enables); 42370a195c02SMika Kahola } 4238abd58f01SBen Widawsky 42393488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 42403488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 42412a57d9ccSImre Deak 4242121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11) { 4243121e758eSDhinakaran Pandiyan u32 de_hpd_masked = 0; 4244b796b971SDhinakaran Pandiyan u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4245b796b971SDhinakaran Pandiyan GEN11_DE_TBT_HOTPLUG_MASK; 4246121e758eSDhinakaran Pandiyan 4247121e758eSDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4248121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 4249121e758eSDhinakaran Pandiyan } else if (IS_GEN9_LP(dev_priv)) { 42502a57d9ccSImre Deak bxt_hpd_detection_setup(dev_priv); 4251121e758eSDhinakaran Pandiyan } else if (IS_BROADWELL(dev_priv)) { 42521a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 4253abd58f01SBen Widawsky } 4254121e758eSDhinakaran Pandiyan } 4255abd58f01SBen Widawsky 4256abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev) 4257abd58f01SBen Widawsky { 4258fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4259abd58f01SBen Widawsky 42606e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4261622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4262622364b6SPaulo Zanoni 4263abd58f01SBen Widawsky gen8_gt_irq_postinstall(dev_priv); 4264abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 4265abd58f01SBen Widawsky 42666e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4267abd58f01SBen Widawsky ibx_irq_postinstall(dev); 4268abd58f01SBen Widawsky 42694376b9c9SMika Kuoppala gen8_master_intr_enable(dev_priv->regs); 4270abd58f01SBen Widawsky 4271abd58f01SBen Widawsky return 0; 4272abd58f01SBen Widawsky } 4273abd58f01SBen Widawsky 427451951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 427551951ae7SMika Kuoppala { 427651951ae7SMika Kuoppala const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 427751951ae7SMika Kuoppala 427851951ae7SMika Kuoppala BUILD_BUG_ON(irqs & 0xffff0000); 427951951ae7SMika Kuoppala 428051951ae7SMika Kuoppala /* Enable RCS, BCS, VCS and VECS class interrupts. */ 428151951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 428251951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 428351951ae7SMika Kuoppala 428451951ae7SMika Kuoppala /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 428551951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 428651951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 428751951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 428851951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 428951951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 429051951ae7SMika Kuoppala 4291d02b98b8SOscar Mateo /* 4292d02b98b8SOscar Mateo * RPS interrupts will get enabled/disabled on demand when RPS itself 4293d02b98b8SOscar Mateo * is enabled/disabled. 4294d02b98b8SOscar Mateo */ 4295d02b98b8SOscar Mateo dev_priv->pm_ier = 0x0; 4296d02b98b8SOscar Mateo dev_priv->pm_imr = ~dev_priv->pm_ier; 4297d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4298d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 429951951ae7SMika Kuoppala } 430051951ae7SMika Kuoppala 430131604222SAnusha Srivatsa static void icp_irq_postinstall(struct drm_device *dev) 430231604222SAnusha Srivatsa { 430331604222SAnusha Srivatsa struct drm_i915_private *dev_priv = to_i915(dev); 430431604222SAnusha Srivatsa u32 mask = SDE_GMBUS_ICP; 430531604222SAnusha Srivatsa 430631604222SAnusha Srivatsa WARN_ON(I915_READ(SDEIER) != 0); 430731604222SAnusha Srivatsa I915_WRITE(SDEIER, 0xffffffff); 430831604222SAnusha Srivatsa POSTING_READ(SDEIER); 430931604222SAnusha Srivatsa 431031604222SAnusha Srivatsa gen3_assert_iir_is_zero(dev_priv, SDEIIR); 431131604222SAnusha Srivatsa I915_WRITE(SDEIMR, ~mask); 431231604222SAnusha Srivatsa 431331604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 431431604222SAnusha Srivatsa } 431531604222SAnusha Srivatsa 431651951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev) 431751951ae7SMika Kuoppala { 431851951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 4319df0d28c1SDhinakaran Pandiyan u32 gu_misc_masked = GEN11_GU_MISC_GSE; 432051951ae7SMika Kuoppala 432131604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 432231604222SAnusha Srivatsa icp_irq_postinstall(dev); 432331604222SAnusha Srivatsa 432451951ae7SMika Kuoppala gen11_gt_irq_postinstall(dev_priv); 432551951ae7SMika Kuoppala gen8_de_irq_postinstall(dev_priv); 432651951ae7SMika Kuoppala 4327df0d28c1SDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4328df0d28c1SDhinakaran Pandiyan 432951951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 433051951ae7SMika Kuoppala 433181067b71SMika Kuoppala gen11_master_intr_enable(dev_priv->regs); 433251951ae7SMika Kuoppala 433351951ae7SMika Kuoppala return 0; 433451951ae7SMika Kuoppala } 433551951ae7SMika Kuoppala 433643f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev) 433743f328d7SVille Syrjälä { 4338fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 433943f328d7SVille Syrjälä 434043f328d7SVille Syrjälä gen8_gt_irq_postinstall(dev_priv); 434143f328d7SVille Syrjälä 4342ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 43439918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4344ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4345ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4346ad22d106SVille Syrjälä 4347e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 434843f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 434943f328d7SVille Syrjälä 435043f328d7SVille Syrjälä return 0; 435143f328d7SVille Syrjälä } 435243f328d7SVille Syrjälä 43536bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev) 4354c2798b19SChris Wilson { 4355fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4356c2798b19SChris Wilson 435744d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 435844d9241eSVille Syrjälä 4359e9e9848aSVille Syrjälä GEN2_IRQ_RESET(); 4360c2798b19SChris Wilson } 4361c2798b19SChris Wilson 4362c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 4363c2798b19SChris Wilson { 4364fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4365e9e9848aSVille Syrjälä u16 enable_mask; 4366c2798b19SChris Wilson 4367045cebd2SVille Syrjälä I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4368045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 4369c2798b19SChris Wilson 4370c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 4371c2798b19SChris Wilson dev_priv->irq_mask = 4372c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 437316659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 437416659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4375c2798b19SChris Wilson 4376e9e9848aSVille Syrjälä enable_mask = 4377c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4378c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 437916659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4380e9e9848aSVille Syrjälä I915_USER_INTERRUPT; 4381e9e9848aSVille Syrjälä 4382e9e9848aSVille Syrjälä GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4383c2798b19SChris Wilson 4384379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4385379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4386d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4387755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4388755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4389d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4390379ef82dSDaniel Vetter 4391c2798b19SChris Wilson return 0; 4392c2798b19SChris Wilson } 4393c2798b19SChris Wilson 439478c357ddSVille Syrjälä static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 439578c357ddSVille Syrjälä u16 *eir, u16 *eir_stuck) 439678c357ddSVille Syrjälä { 439778c357ddSVille Syrjälä u16 emr; 439878c357ddSVille Syrjälä 439978c357ddSVille Syrjälä *eir = I915_READ16(EIR); 440078c357ddSVille Syrjälä 440178c357ddSVille Syrjälä if (*eir) 440278c357ddSVille Syrjälä I915_WRITE16(EIR, *eir); 440378c357ddSVille Syrjälä 440478c357ddSVille Syrjälä *eir_stuck = I915_READ16(EIR); 440578c357ddSVille Syrjälä if (*eir_stuck == 0) 440678c357ddSVille Syrjälä return; 440778c357ddSVille Syrjälä 440878c357ddSVille Syrjälä /* 440978c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 441078c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 441178c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 441278c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 441378c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 441478c357ddSVille Syrjälä * cleared except by handling the underlying error 441578c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 441678c357ddSVille Syrjälä * remains set. 441778c357ddSVille Syrjälä */ 441878c357ddSVille Syrjälä emr = I915_READ16(EMR); 441978c357ddSVille Syrjälä I915_WRITE16(EMR, 0xffff); 442078c357ddSVille Syrjälä I915_WRITE16(EMR, emr | *eir_stuck); 442178c357ddSVille Syrjälä } 442278c357ddSVille Syrjälä 442378c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 442478c357ddSVille Syrjälä u16 eir, u16 eir_stuck) 442578c357ddSVille Syrjälä { 442678c357ddSVille Syrjälä DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 442778c357ddSVille Syrjälä 442878c357ddSVille Syrjälä if (eir_stuck) 442978c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 443078c357ddSVille Syrjälä } 443178c357ddSVille Syrjälä 443278c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 443378c357ddSVille Syrjälä u32 *eir, u32 *eir_stuck) 443478c357ddSVille Syrjälä { 443578c357ddSVille Syrjälä u32 emr; 443678c357ddSVille Syrjälä 443778c357ddSVille Syrjälä *eir = I915_READ(EIR); 443878c357ddSVille Syrjälä 443978c357ddSVille Syrjälä I915_WRITE(EIR, *eir); 444078c357ddSVille Syrjälä 444178c357ddSVille Syrjälä *eir_stuck = I915_READ(EIR); 444278c357ddSVille Syrjälä if (*eir_stuck == 0) 444378c357ddSVille Syrjälä return; 444478c357ddSVille Syrjälä 444578c357ddSVille Syrjälä /* 444678c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 444778c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 444878c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 444978c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 445078c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 445178c357ddSVille Syrjälä * cleared except by handling the underlying error 445278c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 445378c357ddSVille Syrjälä * remains set. 445478c357ddSVille Syrjälä */ 445578c357ddSVille Syrjälä emr = I915_READ(EMR); 445678c357ddSVille Syrjälä I915_WRITE(EMR, 0xffffffff); 445778c357ddSVille Syrjälä I915_WRITE(EMR, emr | *eir_stuck); 445878c357ddSVille Syrjälä } 445978c357ddSVille Syrjälä 446078c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 446178c357ddSVille Syrjälä u32 eir, u32 eir_stuck) 446278c357ddSVille Syrjälä { 446378c357ddSVille Syrjälä DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 446478c357ddSVille Syrjälä 446578c357ddSVille Syrjälä if (eir_stuck) 446678c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 446778c357ddSVille Syrjälä } 446878c357ddSVille Syrjälä 4469ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4470c2798b19SChris Wilson { 447145a83f84SDaniel Vetter struct drm_device *dev = arg; 4472fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4473af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4474c2798b19SChris Wilson 44752dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 44762dd2a883SImre Deak return IRQ_NONE; 44772dd2a883SImre Deak 44781f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 44791f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 44801f814dacSImre Deak 4481af722d28SVille Syrjälä do { 4482af722d28SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 448378c357ddSVille Syrjälä u16 eir = 0, eir_stuck = 0; 4484af722d28SVille Syrjälä u16 iir; 4485af722d28SVille Syrjälä 4486c2798b19SChris Wilson iir = I915_READ16(IIR); 4487c2798b19SChris Wilson if (iir == 0) 4488af722d28SVille Syrjälä break; 4489c2798b19SChris Wilson 4490af722d28SVille Syrjälä ret = IRQ_HANDLED; 4491c2798b19SChris Wilson 4492eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4493eb64343cSVille Syrjälä * signalled in iir */ 4494eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4495c2798b19SChris Wilson 449678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 449778c357ddSVille Syrjälä i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 449878c357ddSVille Syrjälä 4499fd3a4024SDaniel Vetter I915_WRITE16(IIR, iir); 4500c2798b19SChris Wilson 4501c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 45023b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4503c2798b19SChris Wilson 450478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 450578c357ddSVille Syrjälä i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4506af722d28SVille Syrjälä 4507eb64343cSVille Syrjälä i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4508af722d28SVille Syrjälä } while (0); 4509c2798b19SChris Wilson 45101f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 45111f814dacSImre Deak 45121f814dacSImre Deak return ret; 4513c2798b19SChris Wilson } 4514c2798b19SChris Wilson 45156bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev) 4516a266c7d5SChris Wilson { 4517fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4518a266c7d5SChris Wilson 451956b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 45200706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4521a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4522a266c7d5SChris Wilson } 4523a266c7d5SChris Wilson 452444d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 452544d9241eSVille Syrjälä 4526ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4527a266c7d5SChris Wilson } 4528a266c7d5SChris Wilson 4529a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 4530a266c7d5SChris Wilson { 4531fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 453238bde180SChris Wilson u32 enable_mask; 4533a266c7d5SChris Wilson 4534045cebd2SVille Syrjälä I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4535045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 453638bde180SChris Wilson 453738bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 453838bde180SChris Wilson dev_priv->irq_mask = 453938bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 454038bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 454116659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 454216659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 454338bde180SChris Wilson 454438bde180SChris Wilson enable_mask = 454538bde180SChris Wilson I915_ASLE_INTERRUPT | 454638bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 454738bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 454816659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 454938bde180SChris Wilson I915_USER_INTERRUPT; 455038bde180SChris Wilson 455156b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 4552a266c7d5SChris Wilson /* Enable in IER... */ 4553a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4554a266c7d5SChris Wilson /* and unmask in IMR */ 4555a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4556a266c7d5SChris Wilson } 4557a266c7d5SChris Wilson 4558ba7eb789SVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4559a266c7d5SChris Wilson 4560379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4561379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4562d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4563755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4564755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4565d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4566379ef82dSDaniel Vetter 4567c30bb1fdSVille Syrjälä i915_enable_asle_pipestat(dev_priv); 4568c30bb1fdSVille Syrjälä 456920afbda2SDaniel Vetter return 0; 457020afbda2SDaniel Vetter } 457120afbda2SDaniel Vetter 4572ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 4573a266c7d5SChris Wilson { 457445a83f84SDaniel Vetter struct drm_device *dev = arg; 4575fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4576af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4577a266c7d5SChris Wilson 45782dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 45792dd2a883SImre Deak return IRQ_NONE; 45802dd2a883SImre Deak 45811f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 45821f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 45831f814dacSImre Deak 458438bde180SChris Wilson do { 4585eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 458678c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4587af722d28SVille Syrjälä u32 hotplug_status = 0; 4588af722d28SVille Syrjälä u32 iir; 4589a266c7d5SChris Wilson 4590af722d28SVille Syrjälä iir = I915_READ(IIR); 4591af722d28SVille Syrjälä if (iir == 0) 4592af722d28SVille Syrjälä break; 4593af722d28SVille Syrjälä 4594af722d28SVille Syrjälä ret = IRQ_HANDLED; 4595af722d28SVille Syrjälä 4596af722d28SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv) && 4597af722d28SVille Syrjälä iir & I915_DISPLAY_PORT_INTERRUPT) 4598af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4599a266c7d5SChris Wilson 4600eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4601eb64343cSVille Syrjälä * signalled in iir */ 4602eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4603a266c7d5SChris Wilson 460478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 460578c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 460678c357ddSVille Syrjälä 4607fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4608a266c7d5SChris Wilson 4609a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 46103b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4611a266c7d5SChris Wilson 461278c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 461378c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4614a266c7d5SChris Wilson 4615af722d28SVille Syrjälä if (hotplug_status) 4616af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4617af722d28SVille Syrjälä 4618af722d28SVille Syrjälä i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4619af722d28SVille Syrjälä } while (0); 4620a266c7d5SChris Wilson 46211f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 46221f814dacSImre Deak 4623a266c7d5SChris Wilson return ret; 4624a266c7d5SChris Wilson } 4625a266c7d5SChris Wilson 46266bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev) 4627a266c7d5SChris Wilson { 4628fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4629a266c7d5SChris Wilson 46300706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4631a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4632a266c7d5SChris Wilson 463344d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 463444d9241eSVille Syrjälä 4635ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4636a266c7d5SChris Wilson } 4637a266c7d5SChris Wilson 4638a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 4639a266c7d5SChris Wilson { 4640fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4641bbba0a97SChris Wilson u32 enable_mask; 4642a266c7d5SChris Wilson u32 error_mask; 4643a266c7d5SChris Wilson 4644045cebd2SVille Syrjälä /* 4645045cebd2SVille Syrjälä * Enable some error detection, note the instruction error mask 4646045cebd2SVille Syrjälä * bit is reserved, so we leave it masked. 4647045cebd2SVille Syrjälä */ 4648045cebd2SVille Syrjälä if (IS_G4X(dev_priv)) { 4649045cebd2SVille Syrjälä error_mask = ~(GM45_ERROR_PAGE_TABLE | 4650045cebd2SVille Syrjälä GM45_ERROR_MEM_PRIV | 4651045cebd2SVille Syrjälä GM45_ERROR_CP_PRIV | 4652045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4653045cebd2SVille Syrjälä } else { 4654045cebd2SVille Syrjälä error_mask = ~(I915_ERROR_PAGE_TABLE | 4655045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4656045cebd2SVille Syrjälä } 4657045cebd2SVille Syrjälä I915_WRITE(EMR, error_mask); 4658045cebd2SVille Syrjälä 4659a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 4660c30bb1fdSVille Syrjälä dev_priv->irq_mask = 4661c30bb1fdSVille Syrjälä ~(I915_ASLE_INTERRUPT | 4662adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 4663bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4664bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 466578c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4666bbba0a97SChris Wilson 4667c30bb1fdSVille Syrjälä enable_mask = 4668c30bb1fdSVille Syrjälä I915_ASLE_INTERRUPT | 4669c30bb1fdSVille Syrjälä I915_DISPLAY_PORT_INTERRUPT | 4670c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4671c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 467278c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4673c30bb1fdSVille Syrjälä I915_USER_INTERRUPT; 4674bbba0a97SChris Wilson 467591d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4676bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 4677a266c7d5SChris Wilson 4678c30bb1fdSVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4679c30bb1fdSVille Syrjälä 4680b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4681b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4682d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4683755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4684755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4685755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4686d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4687a266c7d5SChris Wilson 468891d14251STvrtko Ursulin i915_enable_asle_pipestat(dev_priv); 468920afbda2SDaniel Vetter 469020afbda2SDaniel Vetter return 0; 469120afbda2SDaniel Vetter } 469220afbda2SDaniel Vetter 469391d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 469420afbda2SDaniel Vetter { 469520afbda2SDaniel Vetter u32 hotplug_en; 469620afbda2SDaniel Vetter 469767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4698b5ea2d56SDaniel Vetter 4699adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 4700e5868a31SEgbert Eich /* enable bits are the same for all generations */ 470191d14251STvrtko Ursulin hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4702a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 4703a266c7d5SChris Wilson to generate a spurious hotplug event about three 4704a266c7d5SChris Wilson seconds later. So just do it once. 4705a266c7d5SChris Wilson */ 470691d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4707a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4708a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4709a266c7d5SChris Wilson 4710a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 47110706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, 4712f9e3dc78SJani Nikula HOTPLUG_INT_EN_MASK | 4713f9e3dc78SJani Nikula CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4714f9e3dc78SJani Nikula CRT_HOTPLUG_ACTIVATION_PERIOD_64, 47150706f17cSEgbert Eich hotplug_en); 4716a266c7d5SChris Wilson } 4717a266c7d5SChris Wilson 4718ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 4719a266c7d5SChris Wilson { 472045a83f84SDaniel Vetter struct drm_device *dev = arg; 4721fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4722af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4723a266c7d5SChris Wilson 47242dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 47252dd2a883SImre Deak return IRQ_NONE; 47262dd2a883SImre Deak 47271f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 47281f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 47291f814dacSImre Deak 4730af722d28SVille Syrjälä do { 4731eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 473278c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4733af722d28SVille Syrjälä u32 hotplug_status = 0; 4734af722d28SVille Syrjälä u32 iir; 47352c8ba29fSChris Wilson 4736af722d28SVille Syrjälä iir = I915_READ(IIR); 4737af722d28SVille Syrjälä if (iir == 0) 4738af722d28SVille Syrjälä break; 4739af722d28SVille Syrjälä 4740af722d28SVille Syrjälä ret = IRQ_HANDLED; 4741af722d28SVille Syrjälä 4742af722d28SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 4743af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4744a266c7d5SChris Wilson 4745eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4746eb64343cSVille Syrjälä * signalled in iir */ 4747eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4748a266c7d5SChris Wilson 474978c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 475078c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 475178c357ddSVille Syrjälä 4752fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4753a266c7d5SChris Wilson 4754a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 47553b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4756af722d28SVille Syrjälä 4757a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 47583b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 4759a266c7d5SChris Wilson 476078c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 476178c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4762515ac2bbSDaniel Vetter 4763af722d28SVille Syrjälä if (hotplug_status) 4764af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4765af722d28SVille Syrjälä 4766af722d28SVille Syrjälä i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4767af722d28SVille Syrjälä } while (0); 4768a266c7d5SChris Wilson 47691f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 47701f814dacSImre Deak 4771a266c7d5SChris Wilson return ret; 4772a266c7d5SChris Wilson } 4773a266c7d5SChris Wilson 4774fca52a55SDaniel Vetter /** 4775fca52a55SDaniel Vetter * intel_irq_init - initializes irq support 4776fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4777fca52a55SDaniel Vetter * 4778fca52a55SDaniel Vetter * This function initializes all the irq support including work items, timers 4779fca52a55SDaniel Vetter * and all the vtables. It does not setup the interrupt itself though. 4780fca52a55SDaniel Vetter */ 4781b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv) 4782f71d4af4SJesse Barnes { 478391c8a326SChris Wilson struct drm_device *dev = &dev_priv->drm; 4784562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 4785cefcff8fSJoonas Lahtinen int i; 47868b2e326dSChris Wilson 478777913b39SJani Nikula intel_hpd_init_work(dev_priv); 478877913b39SJani Nikula 4789562d9baeSSagar Arun Kamble INIT_WORK(&rps->work, gen6_pm_rps_work); 4790cefcff8fSJoonas Lahtinen 4791a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4792cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4793cefcff8fSJoonas Lahtinen dev_priv->l3_parity.remap_info[i] = NULL; 47948b2e326dSChris Wilson 47954805fe82STvrtko Ursulin if (HAS_GUC_SCHED(dev_priv)) 479626705e20SSagar Arun Kamble dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 479726705e20SSagar Arun Kamble 4798a6706b45SDeepak S /* Let's track the enabled rps events */ 4799666a4537SWayne Boyer if (IS_VALLEYVIEW(dev_priv)) 48006c65a587SVille Syrjälä /* WaGsvRC0ResidencyMethod:vlv */ 4801e0e8c7cbSChris Wilson dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 480231685c25SDeepak S else 48034668f695SChris Wilson dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 48044668f695SChris Wilson GEN6_PM_RP_DOWN_THRESHOLD | 48054668f695SChris Wilson GEN6_PM_RP_DOWN_TIMEOUT); 4806a6706b45SDeepak S 4807562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz = 0; 48081800ad25SSagar Arun Kamble 48091800ad25SSagar Arun Kamble /* 4810acf2dc22SMika Kuoppala * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 48111800ad25SSagar Arun Kamble * if GEN6_PM_UP_EI_EXPIRED is masked. 48121800ad25SSagar Arun Kamble * 48131800ad25SSagar Arun Kamble * TODO: verify if this can be reproduced on VLV,CHV. 48141800ad25SSagar Arun Kamble */ 4815bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) <= 7) 4816562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 48171800ad25SSagar Arun Kamble 4818bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 4819562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 48201800ad25SSagar Arun Kamble 4821cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) { 48224194c088SRodrigo Vivi /* Gen2 doesn't have a hardware frame counter */ 48234cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 4824bca2bf2aSPandiyan, Dhinakaran } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4825f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4826fd8f507cSVille Syrjälä dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4827391f75e2SVille Syrjälä } else { 4828391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 4829391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4830f71d4af4SJesse Barnes } 4831f71d4af4SJesse Barnes 483221da2700SVille Syrjälä /* 483321da2700SVille Syrjälä * Opt out of the vblank disable timer on everything except gen2. 483421da2700SVille Syrjälä * Gen2 doesn't have a hardware frame counter and so depends on 483521da2700SVille Syrjälä * vblank interrupts to produce sane vblank seuquence numbers. 483621da2700SVille Syrjälä */ 4837cf819effSLucas De Marchi if (!IS_GEN(dev_priv, 2)) 483821da2700SVille Syrjälä dev->vblank_disable_immediate = true; 483921da2700SVille Syrjälä 4840262fd485SChris Wilson /* Most platforms treat the display irq block as an always-on 4841262fd485SChris Wilson * power domain. vlv/chv can disable it at runtime and need 4842262fd485SChris Wilson * special care to avoid writing any of the display block registers 4843262fd485SChris Wilson * outside of the power domain. We defer setting up the display irqs 4844262fd485SChris Wilson * in this case to the runtime pm. 4845262fd485SChris Wilson */ 4846262fd485SChris Wilson dev_priv->display_irqs_enabled = true; 4847262fd485SChris Wilson if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4848262fd485SChris Wilson dev_priv->display_irqs_enabled = false; 4849262fd485SChris Wilson 4850317eaa95SLyude dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 48519a64c650SLyude Paul /* If we have MST support, we want to avoid doing short HPD IRQ storm 48529a64c650SLyude Paul * detection, as short HPD storms will occur as a natural part of 48539a64c650SLyude Paul * sideband messaging with MST. 48549a64c650SLyude Paul * On older platforms however, IRQ storms can occur with both long and 48559a64c650SLyude Paul * short pulses, as seen on some G4x systems. 48569a64c650SLyude Paul */ 48579a64c650SLyude Paul dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4858317eaa95SLyude 48591bf6ad62SDaniel Vetter dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4860f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4861f71d4af4SJesse Barnes 4862b963291cSDaniel Vetter if (IS_CHERRYVIEW(dev_priv)) { 486343f328d7SVille Syrjälä dev->driver->irq_handler = cherryview_irq_handler; 48646bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = cherryview_irq_reset; 486543f328d7SVille Syrjälä dev->driver->irq_postinstall = cherryview_irq_postinstall; 48666bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = cherryview_irq_reset; 486786e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 486886e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 486943f328d7SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4870b963291cSDaniel Vetter } else if (IS_VALLEYVIEW(dev_priv)) { 48717e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 48726bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = valleyview_irq_reset; 48737e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 48746bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = valleyview_irq_reset; 487586e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 487686e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4877fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 487851951ae7SMika Kuoppala } else if (INTEL_GEN(dev_priv) >= 11) { 487951951ae7SMika Kuoppala dev->driver->irq_handler = gen11_irq_handler; 488051951ae7SMika Kuoppala dev->driver->irq_preinstall = gen11_irq_reset; 488151951ae7SMika Kuoppala dev->driver->irq_postinstall = gen11_irq_postinstall; 488251951ae7SMika Kuoppala dev->driver->irq_uninstall = gen11_irq_reset; 488351951ae7SMika Kuoppala dev->driver->enable_vblank = gen8_enable_vblank; 488451951ae7SMika Kuoppala dev->driver->disable_vblank = gen8_disable_vblank; 4885121e758eSDhinakaran Pandiyan dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4886bca2bf2aSPandiyan, Dhinakaran } else if (INTEL_GEN(dev_priv) >= 8) { 4887abd58f01SBen Widawsky dev->driver->irq_handler = gen8_irq_handler; 4888723761b8SDaniel Vetter dev->driver->irq_preinstall = gen8_irq_reset; 4889abd58f01SBen Widawsky dev->driver->irq_postinstall = gen8_irq_postinstall; 48906bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = gen8_irq_reset; 4891abd58f01SBen Widawsky dev->driver->enable_vblank = gen8_enable_vblank; 4892abd58f01SBen Widawsky dev->driver->disable_vblank = gen8_disable_vblank; 4893cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4894e0a20ad7SShashank Sharma dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 48957b22b8c4SRodrigo Vivi else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 48967b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 48976dbf30ceSVille Syrjälä dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 48986dbf30ceSVille Syrjälä else 48993a3b3c7dSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 49006e266956STvrtko Ursulin } else if (HAS_PCH_SPLIT(dev_priv)) { 4901f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 4902723761b8SDaniel Vetter dev->driver->irq_preinstall = ironlake_irq_reset; 4903f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 49046bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = ironlake_irq_reset; 4905f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 4906f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 4907e4ce95aaSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4908f71d4af4SJesse Barnes } else { 4909cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) { 49106bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i8xx_irq_reset; 4911c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 4912c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 49136bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i8xx_irq_reset; 491486e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 491586e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4916cf819effSLucas De Marchi } else if (IS_GEN(dev_priv, 3)) { 49176bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i915_irq_reset; 4918a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 49196bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i915_irq_reset; 4920a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 492186e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 492286e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4923c2798b19SChris Wilson } else { 49246bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i965_irq_reset; 4925a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 49266bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i965_irq_reset; 4927a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 492886e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 492986e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4930c2798b19SChris Wilson } 4931778eb334SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv)) 4932778eb334SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4933f71d4af4SJesse Barnes } 4934f71d4af4SJesse Barnes } 493520afbda2SDaniel Vetter 4936fca52a55SDaniel Vetter /** 4937cefcff8fSJoonas Lahtinen * intel_irq_fini - deinitializes IRQ support 4938cefcff8fSJoonas Lahtinen * @i915: i915 device instance 4939cefcff8fSJoonas Lahtinen * 4940cefcff8fSJoonas Lahtinen * This function deinitializes all the IRQ support. 4941cefcff8fSJoonas Lahtinen */ 4942cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915) 4943cefcff8fSJoonas Lahtinen { 4944cefcff8fSJoonas Lahtinen int i; 4945cefcff8fSJoonas Lahtinen 4946cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4947cefcff8fSJoonas Lahtinen kfree(i915->l3_parity.remap_info[i]); 4948cefcff8fSJoonas Lahtinen } 4949cefcff8fSJoonas Lahtinen 4950cefcff8fSJoonas Lahtinen /** 4951fca52a55SDaniel Vetter * intel_irq_install - enables the hardware interrupt 4952fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4953fca52a55SDaniel Vetter * 4954fca52a55SDaniel Vetter * This function enables the hardware interrupt handling, but leaves the hotplug 4955fca52a55SDaniel Vetter * handling still disabled. It is called after intel_irq_init(). 4956fca52a55SDaniel Vetter * 4957fca52a55SDaniel Vetter * In the driver load and resume code we need working interrupts in a few places 4958fca52a55SDaniel Vetter * but don't want to deal with the hassle of concurrent probe and hotplug 4959fca52a55SDaniel Vetter * workers. Hence the split into this two-stage approach. 4960fca52a55SDaniel Vetter */ 49612aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv) 49622aeb7d3aSDaniel Vetter { 49632aeb7d3aSDaniel Vetter /* 49642aeb7d3aSDaniel Vetter * We enable some interrupt sources in our postinstall hooks, so mark 49652aeb7d3aSDaniel Vetter * interrupts as enabled _before_ actually enabling them to avoid 49662aeb7d3aSDaniel Vetter * special cases in our ordering checks. 49672aeb7d3aSDaniel Vetter */ 4968ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 49692aeb7d3aSDaniel Vetter 497091c8a326SChris Wilson return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 49712aeb7d3aSDaniel Vetter } 49722aeb7d3aSDaniel Vetter 4973fca52a55SDaniel Vetter /** 4974fca52a55SDaniel Vetter * intel_irq_uninstall - finilizes all irq handling 4975fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4976fca52a55SDaniel Vetter * 4977fca52a55SDaniel Vetter * This stops interrupt and hotplug handling and unregisters and frees all 4978fca52a55SDaniel Vetter * resources acquired in the init functions. 4979fca52a55SDaniel Vetter */ 49802aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv) 49812aeb7d3aSDaniel Vetter { 498291c8a326SChris Wilson drm_irq_uninstall(&dev_priv->drm); 49832aeb7d3aSDaniel Vetter intel_hpd_cancel_work(dev_priv); 4984ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 49852aeb7d3aSDaniel Vetter } 49862aeb7d3aSDaniel Vetter 4987fca52a55SDaniel Vetter /** 4988fca52a55SDaniel Vetter * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4989fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4990fca52a55SDaniel Vetter * 4991fca52a55SDaniel Vetter * This function is used to disable interrupts at runtime, both in the runtime 4992fca52a55SDaniel Vetter * pm and the system suspend/resume code. 4993fca52a55SDaniel Vetter */ 4994b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4995c67a470bSPaulo Zanoni { 499691c8a326SChris Wilson dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4997ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 499891c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 4999c67a470bSPaulo Zanoni } 5000c67a470bSPaulo Zanoni 5001fca52a55SDaniel Vetter /** 5002fca52a55SDaniel Vetter * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 5003fca52a55SDaniel Vetter * @dev_priv: i915 device instance 5004fca52a55SDaniel Vetter * 5005fca52a55SDaniel Vetter * This function is used to enable interrupts at runtime, both in the runtime 5006fca52a55SDaniel Vetter * pm and the system suspend/resume code. 5007fca52a55SDaniel Vetter */ 5008b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 5009c67a470bSPaulo Zanoni { 5010ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 501191c8a326SChris Wilson dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 501291c8a326SChris Wilson dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 5013c67a470bSPaulo Zanoni } 5014