1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34760285e7SDavid Howells #include <drm/drmP.h> 35760285e7SDavid Howells #include <drm/i915_drm.h> 36c0e09200SDave Airlie #include "i915_drv.h" 371c5d22f7SChris Wilson #include "i915_trace.h" 3879e53945SJesse Barnes #include "intel_drv.h" 39c0e09200SDave Airlie 40fca52a55SDaniel Vetter /** 41fca52a55SDaniel Vetter * DOC: interrupt handling 42fca52a55SDaniel Vetter * 43fca52a55SDaniel Vetter * These functions provide the basic support for enabling and disabling the 44fca52a55SDaniel Vetter * interrupt handling support. There's a lot more functionality in i915_irq.c 45fca52a55SDaniel Vetter * and related files, but that will be described in separate chapters. 46fca52a55SDaniel Vetter */ 47fca52a55SDaniel Vetter 48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = { 49e4ce95aaSVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50e4ce95aaSVille Syrjälä }; 51e4ce95aaSVille Syrjälä 5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = { 5323bb4cb5SVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 5423bb4cb5SVille Syrjälä }; 5523bb4cb5SVille Syrjälä 563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = { 573a3b3c7dSVille Syrjälä [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 583a3b3c7dSVille Syrjälä }; 593a3b3c7dSVille Syrjälä 607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = { 61e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 62e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66e5868a31SEgbert Eich }; 67e5868a31SEgbert Eich 687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = { 69e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 7073c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74e5868a31SEgbert Eich }; 75e5868a31SEgbert Eich 7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = { 7774c0b395SVille Syrjälä [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 7826951cafSXiong Zhang [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 7926951cafSXiong Zhang [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 8026951cafSXiong Zhang [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 8126951cafSXiong Zhang [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 8226951cafSXiong Zhang }; 8326951cafSXiong Zhang 847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91e5868a31SEgbert Eich }; 92e5868a31SEgbert Eich 937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100e5868a31SEgbert Eich }; 101e5868a31SEgbert Eich 1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109e5868a31SEgbert Eich }; 110e5868a31SEgbert Eich 111e0a20ad7SShashank Sharma /* BXT hpd list */ 112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = { 1137f3561beSSonika Jindal [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114e0a20ad7SShashank Sharma [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115e0a20ad7SShashank Sharma [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116e0a20ad7SShashank Sharma }; 117e0a20ad7SShashank Sharma 118b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = { 119b796b971SDhinakaran Pandiyan [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120b796b971SDhinakaran Pandiyan [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121b796b971SDhinakaran Pandiyan [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122b796b971SDhinakaran Pandiyan [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123121e758eSDhinakaran Pandiyan }; 124121e758eSDhinakaran Pandiyan 12531604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = { 12631604222SAnusha Srivatsa [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 12731604222SAnusha Srivatsa [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 12831604222SAnusha Srivatsa [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 12931604222SAnusha Srivatsa [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 13031604222SAnusha Srivatsa [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 13131604222SAnusha Srivatsa [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 13231604222SAnusha Srivatsa }; 13331604222SAnusha Srivatsa 1345c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 135f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \ 1365c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 1375c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IMR(which)); \ 1385c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), 0); \ 1395c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1405c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1415c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1425c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1435c502442SPaulo Zanoni } while (0) 1445c502442SPaulo Zanoni 1453488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \ 146a9d356a6SPaulo Zanoni I915_WRITE(type##IMR, 0xffffffff); \ 1475c502442SPaulo Zanoni POSTING_READ(type##IMR); \ 148a9d356a6SPaulo Zanoni I915_WRITE(type##IER, 0); \ 1495c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1505c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 1515c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1525c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 153a9d356a6SPaulo Zanoni } while (0) 154a9d356a6SPaulo Zanoni 155e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \ 156e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, 0xffff); \ 157e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 158e9e9848aSVille Syrjälä I915_WRITE16(type##IER, 0); \ 159e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 160e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 161e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 162e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 163e9e9848aSVille Syrjälä } while (0) 164e9e9848aSVille Syrjälä 165337ba017SPaulo Zanoni /* 166337ba017SPaulo Zanoni * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167337ba017SPaulo Zanoni */ 1683488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169f0f59a00SVille Syrjälä i915_reg_t reg) 170b51a2842SVille Syrjälä { 171b51a2842SVille Syrjälä u32 val = I915_READ(reg); 172b51a2842SVille Syrjälä 173b51a2842SVille Syrjälä if (val == 0) 174b51a2842SVille Syrjälä return; 175b51a2842SVille Syrjälä 176b51a2842SVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177f0f59a00SVille Syrjälä i915_mmio_reg_offset(reg), val); 178b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 179b51a2842SVille Syrjälä POSTING_READ(reg); 180b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 181b51a2842SVille Syrjälä POSTING_READ(reg); 182b51a2842SVille Syrjälä } 183337ba017SPaulo Zanoni 184e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185e9e9848aSVille Syrjälä i915_reg_t reg) 186e9e9848aSVille Syrjälä { 187e9e9848aSVille Syrjälä u16 val = I915_READ16(reg); 188e9e9848aSVille Syrjälä 189e9e9848aSVille Syrjälä if (val == 0) 190e9e9848aSVille Syrjälä return; 191e9e9848aSVille Syrjälä 192e9e9848aSVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193e9e9848aSVille Syrjälä i915_mmio_reg_offset(reg), val); 194e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 195e9e9848aSVille Syrjälä POSTING_READ16(reg); 196e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 197e9e9848aSVille Syrjälä POSTING_READ16(reg); 198e9e9848aSVille Syrjälä } 199e9e9848aSVille Syrjälä 20035079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 2013488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 20235079899SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 2037d1bd539SVille Syrjälä I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 2047d1bd539SVille Syrjälä POSTING_READ(GEN8_##type##_IMR(which)); \ 20535079899SPaulo Zanoni } while (0) 20635079899SPaulo Zanoni 2073488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 2083488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 20935079899SPaulo Zanoni I915_WRITE(type##IER, (ier_val)); \ 2107d1bd539SVille Syrjälä I915_WRITE(type##IMR, (imr_val)); \ 2117d1bd539SVille Syrjälä POSTING_READ(type##IMR); \ 21235079899SPaulo Zanoni } while (0) 21335079899SPaulo Zanoni 214e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215e9e9848aSVille Syrjälä gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216e9e9848aSVille Syrjälä I915_WRITE16(type##IER, (ier_val)); \ 217e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, (imr_val)); \ 218e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 219e9e9848aSVille Syrjälä } while (0) 220e9e9848aSVille Syrjälä 221c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 22226705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223c9a9a268SImre Deak 2240706f17cSEgbert Eich /* For display hotplug interrupt */ 2250706f17cSEgbert Eich static inline void 2260706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 2270706f17cSEgbert Eich uint32_t mask, 2280706f17cSEgbert Eich uint32_t bits) 2290706f17cSEgbert Eich { 2300706f17cSEgbert Eich uint32_t val; 2310706f17cSEgbert Eich 23267520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 2330706f17cSEgbert Eich WARN_ON(bits & ~mask); 2340706f17cSEgbert Eich 2350706f17cSEgbert Eich val = I915_READ(PORT_HOTPLUG_EN); 2360706f17cSEgbert Eich val &= ~mask; 2370706f17cSEgbert Eich val |= bits; 2380706f17cSEgbert Eich I915_WRITE(PORT_HOTPLUG_EN, val); 2390706f17cSEgbert Eich } 2400706f17cSEgbert Eich 2410706f17cSEgbert Eich /** 2420706f17cSEgbert Eich * i915_hotplug_interrupt_update - update hotplug interrupt enable 2430706f17cSEgbert Eich * @dev_priv: driver private 2440706f17cSEgbert Eich * @mask: bits to update 2450706f17cSEgbert Eich * @bits: bits to enable 2460706f17cSEgbert Eich * NOTE: the HPD enable bits are modified both inside and outside 2470706f17cSEgbert Eich * of an interrupt context. To avoid that read-modify-write cycles 2480706f17cSEgbert Eich * interfer, these bits are protected by a spinlock. Since this 2490706f17cSEgbert Eich * function is usually not called from a context where the lock is 2500706f17cSEgbert Eich * held already, this function acquires the lock itself. A non-locking 2510706f17cSEgbert Eich * version is also available. 2520706f17cSEgbert Eich */ 2530706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2540706f17cSEgbert Eich uint32_t mask, 2550706f17cSEgbert Eich uint32_t bits) 2560706f17cSEgbert Eich { 2570706f17cSEgbert Eich spin_lock_irq(&dev_priv->irq_lock); 2580706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 2590706f17cSEgbert Eich spin_unlock_irq(&dev_priv->irq_lock); 2600706f17cSEgbert Eich } 2610706f17cSEgbert Eich 26296606f3bSOscar Mateo static u32 26396606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915, 26496606f3bSOscar Mateo const unsigned int bank, const unsigned int bit); 26596606f3bSOscar Mateo 26660a94324SChris Wilson static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 26796606f3bSOscar Mateo const unsigned int bank, 26896606f3bSOscar Mateo const unsigned int bit) 26996606f3bSOscar Mateo { 27096606f3bSOscar Mateo void __iomem * const regs = i915->regs; 27196606f3bSOscar Mateo u32 dw; 27296606f3bSOscar Mateo 27396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 27496606f3bSOscar Mateo 27596606f3bSOscar Mateo dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 27696606f3bSOscar Mateo if (dw & BIT(bit)) { 27796606f3bSOscar Mateo /* 27896606f3bSOscar Mateo * According to the BSpec, DW_IIR bits cannot be cleared without 27996606f3bSOscar Mateo * first servicing the Selector & Shared IIR registers. 28096606f3bSOscar Mateo */ 28196606f3bSOscar Mateo gen11_gt_engine_identity(i915, bank, bit); 28296606f3bSOscar Mateo 28396606f3bSOscar Mateo /* 28496606f3bSOscar Mateo * We locked GT INT DW by reading it. If we want to (try 28596606f3bSOscar Mateo * to) recover from this succesfully, we need to clear 28696606f3bSOscar Mateo * our bit, otherwise we are locking the register for 28796606f3bSOscar Mateo * everybody. 28896606f3bSOscar Mateo */ 28996606f3bSOscar Mateo raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 29096606f3bSOscar Mateo 29196606f3bSOscar Mateo return true; 29296606f3bSOscar Mateo } 29396606f3bSOscar Mateo 29496606f3bSOscar Mateo return false; 29596606f3bSOscar Mateo } 29696606f3bSOscar Mateo 297d9dc34f1SVille Syrjälä /** 298d9dc34f1SVille Syrjälä * ilk_update_display_irq - update DEIMR 299d9dc34f1SVille Syrjälä * @dev_priv: driver private 300d9dc34f1SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 301d9dc34f1SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 302d9dc34f1SVille Syrjälä */ 303fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304d9dc34f1SVille Syrjälä uint32_t interrupt_mask, 305d9dc34f1SVille Syrjälä uint32_t enabled_irq_mask) 306036a4a7dSZhenyu Wang { 307d9dc34f1SVille Syrjälä uint32_t new_val; 308d9dc34f1SVille Syrjälä 30967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 3104bc9d430SDaniel Vetter 311d9dc34f1SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 312d9dc34f1SVille Syrjälä 3139df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314c67a470bSPaulo Zanoni return; 315c67a470bSPaulo Zanoni 316d9dc34f1SVille Syrjälä new_val = dev_priv->irq_mask; 317d9dc34f1SVille Syrjälä new_val &= ~interrupt_mask; 318d9dc34f1SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 319d9dc34f1SVille Syrjälä 320d9dc34f1SVille Syrjälä if (new_val != dev_priv->irq_mask) { 321d9dc34f1SVille Syrjälä dev_priv->irq_mask = new_val; 3221ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 3233143a2bfSChris Wilson POSTING_READ(DEIMR); 324036a4a7dSZhenyu Wang } 325036a4a7dSZhenyu Wang } 326036a4a7dSZhenyu Wang 32743eaea13SPaulo Zanoni /** 32843eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 32943eaea13SPaulo Zanoni * @dev_priv: driver private 33043eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 33143eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 33243eaea13SPaulo Zanoni */ 33343eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 33443eaea13SPaulo Zanoni uint32_t interrupt_mask, 33543eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 33643eaea13SPaulo Zanoni { 33767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 33843eaea13SPaulo Zanoni 33915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 34015a17aaeSDaniel Vetter 3419df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342c67a470bSPaulo Zanoni return; 343c67a470bSPaulo Zanoni 34443eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 34543eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 34643eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 34743eaea13SPaulo Zanoni } 34843eaea13SPaulo Zanoni 349480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35043eaea13SPaulo Zanoni { 35143eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 35231bb59ccSChris Wilson POSTING_READ_FW(GTIMR); 35343eaea13SPaulo Zanoni } 35443eaea13SPaulo Zanoni 355480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35643eaea13SPaulo Zanoni { 35743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 35843eaea13SPaulo Zanoni } 35943eaea13SPaulo Zanoni 360f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361b900b949SImre Deak { 362d02b98b8SOscar Mateo WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363d02b98b8SOscar Mateo 364bca2bf2aSPandiyan, Dhinakaran return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365b900b949SImre Deak } 366b900b949SImre Deak 367f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368a72fbc3aSImre Deak { 369d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 370d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_MASK; 371d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 372d02b98b8SOscar Mateo return GEN8_GT_IMR(2); 373d02b98b8SOscar Mateo else 374d02b98b8SOscar Mateo return GEN6_PMIMR; 375a72fbc3aSImre Deak } 376a72fbc3aSImre Deak 377f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378b900b949SImre Deak { 379d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 380d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 382d02b98b8SOscar Mateo return GEN8_GT_IER(2); 383d02b98b8SOscar Mateo else 384d02b98b8SOscar Mateo return GEN6_PMIER; 385b900b949SImre Deak } 386b900b949SImre Deak 387edbfdb45SPaulo Zanoni /** 388edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 389edbfdb45SPaulo Zanoni * @dev_priv: driver private 390edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 391edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 392edbfdb45SPaulo Zanoni */ 393edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 395edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 396edbfdb45SPaulo Zanoni { 397605cd25bSPaulo Zanoni uint32_t new_val; 398edbfdb45SPaulo Zanoni 39915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 40015a17aaeSDaniel Vetter 40167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 402edbfdb45SPaulo Zanoni 403f4e9af4fSAkash Goel new_val = dev_priv->pm_imr; 404f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 405f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 406f52ecbcfSPaulo Zanoni 407f4e9af4fSAkash Goel if (new_val != dev_priv->pm_imr) { 408f4e9af4fSAkash Goel dev_priv->pm_imr = new_val; 409f4e9af4fSAkash Goel I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410a72fbc3aSImre Deak POSTING_READ(gen6_pm_imr(dev_priv)); 411edbfdb45SPaulo Zanoni } 412f52ecbcfSPaulo Zanoni } 413edbfdb45SPaulo Zanoni 414f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415edbfdb45SPaulo Zanoni { 4169939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4179939fba2SImre Deak return; 4189939fba2SImre Deak 419edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 420edbfdb45SPaulo Zanoni } 421edbfdb45SPaulo Zanoni 422f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 4239939fba2SImre Deak { 4249939fba2SImre Deak snb_update_pm_irq(dev_priv, mask, 0); 4259939fba2SImre Deak } 4269939fba2SImre Deak 427f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428edbfdb45SPaulo Zanoni { 4299939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4309939fba2SImre Deak return; 4319939fba2SImre Deak 432f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, mask); 433f4e9af4fSAkash Goel } 434f4e9af4fSAkash Goel 4353814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436f4e9af4fSAkash Goel { 437f4e9af4fSAkash Goel i915_reg_t reg = gen6_pm_iir(dev_priv); 438f4e9af4fSAkash Goel 43967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 440f4e9af4fSAkash Goel 441f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 442f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 443f4e9af4fSAkash Goel POSTING_READ(reg); 444f4e9af4fSAkash Goel } 445f4e9af4fSAkash Goel 4463814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447f4e9af4fSAkash Goel { 44867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 449f4e9af4fSAkash Goel 450f4e9af4fSAkash Goel dev_priv->pm_ier |= enable_mask; 451f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452f4e9af4fSAkash Goel gen6_unmask_pm_irq(dev_priv, enable_mask); 453f4e9af4fSAkash Goel /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454f4e9af4fSAkash Goel } 455f4e9af4fSAkash Goel 4563814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457f4e9af4fSAkash Goel { 45867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 459f4e9af4fSAkash Goel 460f4e9af4fSAkash Goel dev_priv->pm_ier &= ~disable_mask; 461f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, disable_mask); 462f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463f4e9af4fSAkash Goel /* though a barrier is missing here, but don't really need a one */ 464edbfdb45SPaulo Zanoni } 465edbfdb45SPaulo Zanoni 466d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467d02b98b8SOscar Mateo { 468d02b98b8SOscar Mateo spin_lock_irq(&dev_priv->irq_lock); 469d02b98b8SOscar Mateo 47096606f3bSOscar Mateo while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 47196606f3bSOscar Mateo ; 472d02b98b8SOscar Mateo 473d02b98b8SOscar Mateo dev_priv->gt_pm.rps.pm_iir = 0; 474d02b98b8SOscar Mateo 475d02b98b8SOscar Mateo spin_unlock_irq(&dev_priv->irq_lock); 476d02b98b8SOscar Mateo } 477d02b98b8SOscar Mateo 478dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 4793cc134e3SImre Deak { 4803cc134e3SImre Deak spin_lock_irq(&dev_priv->irq_lock); 4814668f695SChris Wilson gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 482562d9baeSSagar Arun Kamble dev_priv->gt_pm.rps.pm_iir = 0; 4833cc134e3SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 4843cc134e3SImre Deak } 4853cc134e3SImre Deak 48691d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487b900b949SImre Deak { 488562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 489562d9baeSSagar Arun Kamble 490562d9baeSSagar Arun Kamble if (READ_ONCE(rps->interrupts_enabled)) 491f2a91d1aSChris Wilson return; 492f2a91d1aSChris Wilson 493b900b949SImre Deak spin_lock_irq(&dev_priv->irq_lock); 494562d9baeSSagar Arun Kamble WARN_ON_ONCE(rps->pm_iir); 49596606f3bSOscar Mateo 496d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 49796606f3bSOscar Mateo WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498d02b98b8SOscar Mateo else 499c33d247dSChris Wilson WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 50096606f3bSOscar Mateo 501562d9baeSSagar Arun Kamble rps->interrupts_enabled = true; 502b900b949SImre Deak gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 50378e68d36SImre Deak 504b900b949SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 505b900b949SImre Deak } 506b900b949SImre Deak 50791d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508b900b949SImre Deak { 509562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 510562d9baeSSagar Arun Kamble 511562d9baeSSagar Arun Kamble if (!READ_ONCE(rps->interrupts_enabled)) 512f2a91d1aSChris Wilson return; 513f2a91d1aSChris Wilson 514d4d70aa5SImre Deak spin_lock_irq(&dev_priv->irq_lock); 515562d9baeSSagar Arun Kamble rps->interrupts_enabled = false; 5169939fba2SImre Deak 517b20e3cfeSDave Gordon I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 5189939fba2SImre Deak 5194668f695SChris Wilson gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 52058072ccbSImre Deak 52158072ccbSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 52291c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 523c33d247dSChris Wilson 524c33d247dSChris Wilson /* Now that we will not be generating any more work, flush any 5253814fd77SOscar Mateo * outstanding tasks. As we are called on the RPS idle path, 526c33d247dSChris Wilson * we will reset the GPU to minimum frequencies, so the current 527c33d247dSChris Wilson * state of the worker can be discarded. 528c33d247dSChris Wilson */ 529562d9baeSSagar Arun Kamble cancel_work_sync(&rps->work); 530d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 531d02b98b8SOscar Mateo gen11_reset_rps_interrupts(dev_priv); 532d02b98b8SOscar Mateo else 533c33d247dSChris Wilson gen6_reset_rps_interrupts(dev_priv); 534b900b949SImre Deak } 535b900b949SImre Deak 53626705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 53726705e20SSagar Arun Kamble { 5381be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5391be333d3SSagar Arun Kamble 54026705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 54126705e20SSagar Arun Kamble gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 54226705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 54326705e20SSagar Arun Kamble } 54426705e20SSagar Arun Kamble 54526705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 54626705e20SSagar Arun Kamble { 5471be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5481be333d3SSagar Arun Kamble 54926705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 55026705e20SSagar Arun Kamble if (!dev_priv->guc.interrupts_enabled) { 55126705e20SSagar Arun Kamble WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 55226705e20SSagar Arun Kamble dev_priv->pm_guc_events); 55326705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = true; 55426705e20SSagar Arun Kamble gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 55526705e20SSagar Arun Kamble } 55626705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 55726705e20SSagar Arun Kamble } 55826705e20SSagar Arun Kamble 55926705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 56026705e20SSagar Arun Kamble { 5611be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5621be333d3SSagar Arun Kamble 56326705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 56426705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = false; 56526705e20SSagar Arun Kamble 56626705e20SSagar Arun Kamble gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 56726705e20SSagar Arun Kamble 56826705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 56926705e20SSagar Arun Kamble synchronize_irq(dev_priv->drm.irq); 57026705e20SSagar Arun Kamble 57126705e20SSagar Arun Kamble gen9_reset_guc_interrupts(dev_priv); 57226705e20SSagar Arun Kamble } 57326705e20SSagar Arun Kamble 5740961021aSBen Widawsky /** 5753a3b3c7dSVille Syrjälä * bdw_update_port_irq - update DE port interrupt 5763a3b3c7dSVille Syrjälä * @dev_priv: driver private 5773a3b3c7dSVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 5783a3b3c7dSVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 5793a3b3c7dSVille Syrjälä */ 5803a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 5813a3b3c7dSVille Syrjälä uint32_t interrupt_mask, 5823a3b3c7dSVille Syrjälä uint32_t enabled_irq_mask) 5833a3b3c7dSVille Syrjälä { 5843a3b3c7dSVille Syrjälä uint32_t new_val; 5853a3b3c7dSVille Syrjälä uint32_t old_val; 5863a3b3c7dSVille Syrjälä 58767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 5883a3b3c7dSVille Syrjälä 5893a3b3c7dSVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 5903a3b3c7dSVille Syrjälä 5913a3b3c7dSVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 5923a3b3c7dSVille Syrjälä return; 5933a3b3c7dSVille Syrjälä 5943a3b3c7dSVille Syrjälä old_val = I915_READ(GEN8_DE_PORT_IMR); 5953a3b3c7dSVille Syrjälä 5963a3b3c7dSVille Syrjälä new_val = old_val; 5973a3b3c7dSVille Syrjälä new_val &= ~interrupt_mask; 5983a3b3c7dSVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 5993a3b3c7dSVille Syrjälä 6003a3b3c7dSVille Syrjälä if (new_val != old_val) { 6013a3b3c7dSVille Syrjälä I915_WRITE(GEN8_DE_PORT_IMR, new_val); 6023a3b3c7dSVille Syrjälä POSTING_READ(GEN8_DE_PORT_IMR); 6033a3b3c7dSVille Syrjälä } 6043a3b3c7dSVille Syrjälä } 6053a3b3c7dSVille Syrjälä 6063a3b3c7dSVille Syrjälä /** 607013d3752SVille Syrjälä * bdw_update_pipe_irq - update DE pipe interrupt 608013d3752SVille Syrjälä * @dev_priv: driver private 609013d3752SVille Syrjälä * @pipe: pipe whose interrupt to update 610013d3752SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 611013d3752SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 612013d3752SVille Syrjälä */ 613013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614013d3752SVille Syrjälä enum pipe pipe, 615013d3752SVille Syrjälä uint32_t interrupt_mask, 616013d3752SVille Syrjälä uint32_t enabled_irq_mask) 617013d3752SVille Syrjälä { 618013d3752SVille Syrjälä uint32_t new_val; 619013d3752SVille Syrjälä 62067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 621013d3752SVille Syrjälä 622013d3752SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 623013d3752SVille Syrjälä 624013d3752SVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625013d3752SVille Syrjälä return; 626013d3752SVille Syrjälä 627013d3752SVille Syrjälä new_val = dev_priv->de_irq_mask[pipe]; 628013d3752SVille Syrjälä new_val &= ~interrupt_mask; 629013d3752SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 630013d3752SVille Syrjälä 631013d3752SVille Syrjälä if (new_val != dev_priv->de_irq_mask[pipe]) { 632013d3752SVille Syrjälä dev_priv->de_irq_mask[pipe] = new_val; 633013d3752SVille Syrjälä I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634013d3752SVille Syrjälä POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635013d3752SVille Syrjälä } 636013d3752SVille Syrjälä } 637013d3752SVille Syrjälä 638013d3752SVille Syrjälä /** 639fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 640fee884edSDaniel Vetter * @dev_priv: driver private 641fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 642fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 643fee884edSDaniel Vetter */ 64447339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645fee884edSDaniel Vetter uint32_t interrupt_mask, 646fee884edSDaniel Vetter uint32_t enabled_irq_mask) 647fee884edSDaniel Vetter { 648fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 649fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 650fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 651fee884edSDaniel Vetter 65215a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 65315a17aaeSDaniel Vetter 65467520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 655fee884edSDaniel Vetter 6569df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657c67a470bSPaulo Zanoni return; 658c67a470bSPaulo Zanoni 659fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 660fee884edSDaniel Vetter POSTING_READ(SDEIMR); 661fee884edSDaniel Vetter } 6628664281bSPaulo Zanoni 6636b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 6646b12ca56SVille Syrjälä enum pipe pipe) 6657c463586SKeith Packard { 6666b12ca56SVille Syrjälä u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 66710c59c51SImre Deak u32 enable_mask = status_mask << 16; 66810c59c51SImre Deak 6696b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 6706b12ca56SVille Syrjälä 6716b12ca56SVille Syrjälä if (INTEL_GEN(dev_priv) < 5) 6726b12ca56SVille Syrjälä goto out; 6736b12ca56SVille Syrjälä 67410c59c51SImre Deak /* 675724a6905SVille Syrjälä * On pipe A we don't support the PSR interrupt yet, 676724a6905SVille Syrjälä * on pipe B and C the same bit MBZ. 67710c59c51SImre Deak */ 67810c59c51SImre Deak if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 67910c59c51SImre Deak return 0; 680724a6905SVille Syrjälä /* 681724a6905SVille Syrjälä * On pipe B and C we don't support the PSR interrupt yet, on pipe 682724a6905SVille Syrjälä * A the same bit is for perf counters which we don't use either. 683724a6905SVille Syrjälä */ 684724a6905SVille Syrjälä if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685724a6905SVille Syrjälä return 0; 68610c59c51SImre Deak 68710c59c51SImre Deak enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 68810c59c51SImre Deak SPRITE0_FLIP_DONE_INT_EN_VLV | 68910c59c51SImre Deak SPRITE1_FLIP_DONE_INT_EN_VLV); 69010c59c51SImre Deak if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 69110c59c51SImre Deak enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 69210c59c51SImre Deak if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 69310c59c51SImre Deak enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 69410c59c51SImre Deak 6956b12ca56SVille Syrjälä out: 6966b12ca56SVille Syrjälä WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 6976b12ca56SVille Syrjälä status_mask & ~PIPESTAT_INT_STATUS_MASK, 6986b12ca56SVille Syrjälä "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 6996b12ca56SVille Syrjälä pipe_name(pipe), enable_mask, status_mask); 7006b12ca56SVille Syrjälä 70110c59c51SImre Deak return enable_mask; 70210c59c51SImre Deak } 70310c59c51SImre Deak 7046b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv, 7056b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 706755e9019SImre Deak { 7076b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 708755e9019SImre Deak u32 enable_mask; 709755e9019SImre Deak 7106b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7116b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7126b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7136b12ca56SVille Syrjälä 7146b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7156b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7166b12ca56SVille Syrjälä 7176b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 7186b12ca56SVille Syrjälä return; 7196b12ca56SVille Syrjälä 7206b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] |= status_mask; 7216b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7226b12ca56SVille Syrjälä 7236b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7246b12ca56SVille Syrjälä POSTING_READ(reg); 725755e9019SImre Deak } 726755e9019SImre Deak 7276b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv, 7286b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 729755e9019SImre Deak { 7306b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 731755e9019SImre Deak u32 enable_mask; 732755e9019SImre Deak 7336b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7346b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7356b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7366b12ca56SVille Syrjälä 7376b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7386b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7396b12ca56SVille Syrjälä 7406b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 7416b12ca56SVille Syrjälä return; 7426b12ca56SVille Syrjälä 7436b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 7446b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7456b12ca56SVille Syrjälä 7466b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7476b12ca56SVille Syrjälä POSTING_READ(reg); 748755e9019SImre Deak } 749755e9019SImre Deak 750c0e09200SDave Airlie /** 751f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 75214bb2c11STvrtko Ursulin * @dev_priv: i915 device private 75301c66889SZhao Yakui */ 75491d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 75501c66889SZhao Yakui { 75691d14251STvrtko Ursulin if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757f49e38ddSJani Nikula return; 758f49e38ddSJani Nikula 75913321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 76001c66889SZhao Yakui 761755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 76291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 4) 7633b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, 764755e9019SImre Deak PIPE_LEGACY_BLC_EVENT_STATUS); 7651ec14ad3SChris Wilson 76613321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 76701c66889SZhao Yakui } 76801c66889SZhao Yakui 769f75f3746SVille Syrjälä /* 770f75f3746SVille Syrjälä * This timing diagram depicts the video signal in and 771f75f3746SVille Syrjälä * around the vertical blanking period. 772f75f3746SVille Syrjälä * 773f75f3746SVille Syrjälä * Assumptions about the fictitious mode used in this example: 774f75f3746SVille Syrjälä * vblank_start >= 3 775f75f3746SVille Syrjälä * vsync_start = vblank_start + 1 776f75f3746SVille Syrjälä * vsync_end = vblank_start + 2 777f75f3746SVille Syrjälä * vtotal = vblank_start + 3 778f75f3746SVille Syrjälä * 779f75f3746SVille Syrjälä * start of vblank: 780f75f3746SVille Syrjälä * latch double buffered registers 781f75f3746SVille Syrjälä * increment frame counter (ctg+) 782f75f3746SVille Syrjälä * generate start of vblank interrupt (gen4+) 783f75f3746SVille Syrjälä * | 784f75f3746SVille Syrjälä * | frame start: 785f75f3746SVille Syrjälä * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786f75f3746SVille Syrjälä * | may be shifted forward 1-3 extra lines via PIPECONF 787f75f3746SVille Syrjälä * | | 788f75f3746SVille Syrjälä * | | start of vsync: 789f75f3746SVille Syrjälä * | | generate vsync interrupt 790f75f3746SVille Syrjälä * | | | 791f75f3746SVille Syrjälä * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792f75f3746SVille Syrjälä * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793f75f3746SVille Syrjälä * ----va---> <-----------------vb--------------------> <--------va------------- 794f75f3746SVille Syrjälä * | | <----vs-----> | 795f75f3746SVille Syrjälä * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796f75f3746SVille Syrjälä * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797f75f3746SVille Syrjälä * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798f75f3746SVille Syrjälä * | | | 799f75f3746SVille Syrjälä * last visible pixel first visible pixel 800f75f3746SVille Syrjälä * | increment frame counter (gen3/4) 801f75f3746SVille Syrjälä * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802f75f3746SVille Syrjälä * 803f75f3746SVille Syrjälä * x = horizontal active 804f75f3746SVille Syrjälä * _ = horizontal blanking 805f75f3746SVille Syrjälä * hs = horizontal sync 806f75f3746SVille Syrjälä * va = vertical active 807f75f3746SVille Syrjälä * vb = vertical blanking 808f75f3746SVille Syrjälä * vs = vertical sync 809f75f3746SVille Syrjälä * vbs = vblank_start (number) 810f75f3746SVille Syrjälä * 811f75f3746SVille Syrjälä * Summary: 812f75f3746SVille Syrjälä * - most events happen at the start of horizontal sync 813f75f3746SVille Syrjälä * - frame start happens at the start of horizontal blank, 1-4 lines 814f75f3746SVille Syrjälä * (depending on PIPECONF settings) after the start of vblank 815f75f3746SVille Syrjälä * - gen3/4 pixel and frame counter are synchronized with the start 816f75f3746SVille Syrjälä * of horizontal active on the first line of vertical active 817f75f3746SVille Syrjälä */ 818f75f3746SVille Syrjälä 81942f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 82042f52ef8SKeith Packard * we use as a pipe index 82142f52ef8SKeith Packard */ 82288e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8230a3e67a4SJesse Barnes { 824fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 825f0f59a00SVille Syrjälä i915_reg_t high_frame, low_frame; 8260b2a8e09SVille Syrjälä u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 8275caa0feaSDaniel Vetter const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828694e409dSVille Syrjälä unsigned long irqflags; 829391f75e2SVille Syrjälä 8300b2a8e09SVille Syrjälä htotal = mode->crtc_htotal; 8310b2a8e09SVille Syrjälä hsync_start = mode->crtc_hsync_start; 8320b2a8e09SVille Syrjälä vbl_start = mode->crtc_vblank_start; 8330b2a8e09SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 8340b2a8e09SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 835391f75e2SVille Syrjälä 8360b2a8e09SVille Syrjälä /* Convert to pixel count */ 8370b2a8e09SVille Syrjälä vbl_start *= htotal; 8380b2a8e09SVille Syrjälä 8390b2a8e09SVille Syrjälä /* Start of vblank event occurs at start of hsync */ 8400b2a8e09SVille Syrjälä vbl_start -= htotal - hsync_start; 8410b2a8e09SVille Syrjälä 8429db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 8439db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 8445eddb70bSChris Wilson 845694e409dSVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846694e409dSVille Syrjälä 8470a3e67a4SJesse Barnes /* 8480a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 8490a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 8500a3e67a4SJesse Barnes * register. 8510a3e67a4SJesse Barnes */ 8520a3e67a4SJesse Barnes do { 853694e409dSVille Syrjälä high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854694e409dSVille Syrjälä low = I915_READ_FW(low_frame); 855694e409dSVille Syrjälä high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 8560a3e67a4SJesse Barnes } while (high1 != high2); 8570a3e67a4SJesse Barnes 858694e409dSVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859694e409dSVille Syrjälä 8605eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 861391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 8625eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 863391f75e2SVille Syrjälä 864391f75e2SVille Syrjälä /* 865391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 866391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 867391f75e2SVille Syrjälä * counter against vblank start. 868391f75e2SVille Syrjälä */ 869edc08d0aSVille Syrjälä return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 8700a3e67a4SJesse Barnes } 8710a3e67a4SJesse Barnes 872974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8739880b7a5SJesse Barnes { 874fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 8759880b7a5SJesse Barnes 876649636efSVille Syrjälä return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 8779880b7a5SJesse Barnes } 8789880b7a5SJesse Barnes 879aec0246fSUma Shankar /* 880aec0246fSUma Shankar * On certain encoders on certain platforms, pipe 881aec0246fSUma Shankar * scanline register will not work to get the scanline, 882aec0246fSUma Shankar * since the timings are driven from the PORT or issues 883aec0246fSUma Shankar * with scanline register updates. 884aec0246fSUma Shankar * This function will use Framestamp and current 885aec0246fSUma Shankar * timestamp registers to calculate the scanline. 886aec0246fSUma Shankar */ 887aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888aec0246fSUma Shankar { 889aec0246fSUma Shankar struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890aec0246fSUma Shankar struct drm_vblank_crtc *vblank = 891aec0246fSUma Shankar &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892aec0246fSUma Shankar const struct drm_display_mode *mode = &vblank->hwmode; 893aec0246fSUma Shankar u32 vblank_start = mode->crtc_vblank_start; 894aec0246fSUma Shankar u32 vtotal = mode->crtc_vtotal; 895aec0246fSUma Shankar u32 htotal = mode->crtc_htotal; 896aec0246fSUma Shankar u32 clock = mode->crtc_clock; 897aec0246fSUma Shankar u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898aec0246fSUma Shankar 899aec0246fSUma Shankar /* 900aec0246fSUma Shankar * To avoid the race condition where we might cross into the 901aec0246fSUma Shankar * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902aec0246fSUma Shankar * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903aec0246fSUma Shankar * during the same frame. 904aec0246fSUma Shankar */ 905aec0246fSUma Shankar do { 906aec0246fSUma Shankar /* 907aec0246fSUma Shankar * This field provides read back of the display 908aec0246fSUma Shankar * pipe frame time stamp. The time stamp value 909aec0246fSUma Shankar * is sampled at every start of vertical blank. 910aec0246fSUma Shankar */ 911aec0246fSUma Shankar scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912aec0246fSUma Shankar 913aec0246fSUma Shankar /* 914aec0246fSUma Shankar * The TIMESTAMP_CTR register has the current 915aec0246fSUma Shankar * time stamp value. 916aec0246fSUma Shankar */ 917aec0246fSUma Shankar scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918aec0246fSUma Shankar 919aec0246fSUma Shankar scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920aec0246fSUma Shankar } while (scan_post_time != scan_prev_time); 921aec0246fSUma Shankar 922aec0246fSUma Shankar scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923aec0246fSUma Shankar clock), 1000 * htotal); 924aec0246fSUma Shankar scanline = min(scanline, vtotal - 1); 925aec0246fSUma Shankar scanline = (scanline + vblank_start) % vtotal; 926aec0246fSUma Shankar 927aec0246fSUma Shankar return scanline; 928aec0246fSUma Shankar } 929aec0246fSUma Shankar 93075aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932a225f079SVille Syrjälä { 933a225f079SVille Syrjälä struct drm_device *dev = crtc->base.dev; 934fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 9355caa0feaSDaniel Vetter const struct drm_display_mode *mode; 9365caa0feaSDaniel Vetter struct drm_vblank_crtc *vblank; 937a225f079SVille Syrjälä enum pipe pipe = crtc->pipe; 93880715b2fSVille Syrjälä int position, vtotal; 939a225f079SVille Syrjälä 94072259536SVille Syrjälä if (!crtc->active) 94172259536SVille Syrjälä return -1; 94272259536SVille Syrjälä 9435caa0feaSDaniel Vetter vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 9445caa0feaSDaniel Vetter mode = &vblank->hwmode; 9455caa0feaSDaniel Vetter 946aec0246fSUma Shankar if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947aec0246fSUma Shankar return __intel_get_crtc_scanline_from_timestamp(crtc); 948aec0246fSUma Shankar 94980715b2fSVille Syrjälä vtotal = mode->crtc_vtotal; 950a225f079SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951a225f079SVille Syrjälä vtotal /= 2; 952a225f079SVille Syrjälä 95391d14251STvrtko Ursulin if (IS_GEN2(dev_priv)) 95475aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955a225f079SVille Syrjälä else 95675aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957a225f079SVille Syrjälä 958a225f079SVille Syrjälä /* 95941b578fbSJesse Barnes * On HSW, the DSL reg (0x70000) appears to return 0 if we 96041b578fbSJesse Barnes * read it just before the start of vblank. So try it again 96141b578fbSJesse Barnes * so we don't accidentally end up spanning a vblank frame 96241b578fbSJesse Barnes * increment, causing the pipe_update_end() code to squak at us. 96341b578fbSJesse Barnes * 96441b578fbSJesse Barnes * The nature of this problem means we can't simply check the ISR 96541b578fbSJesse Barnes * bit and return the vblank start value; nor can we use the scanline 96641b578fbSJesse Barnes * debug register in the transcoder as it appears to have the same 96741b578fbSJesse Barnes * problem. We may need to extend this to include other platforms, 96841b578fbSJesse Barnes * but so far testing only shows the problem on HSW. 96941b578fbSJesse Barnes */ 97091d14251STvrtko Ursulin if (HAS_DDI(dev_priv) && !position) { 97141b578fbSJesse Barnes int i, temp; 97241b578fbSJesse Barnes 97341b578fbSJesse Barnes for (i = 0; i < 100; i++) { 97441b578fbSJesse Barnes udelay(1); 975707bdd3fSVille Syrjälä temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 97641b578fbSJesse Barnes if (temp != position) { 97741b578fbSJesse Barnes position = temp; 97841b578fbSJesse Barnes break; 97941b578fbSJesse Barnes } 98041b578fbSJesse Barnes } 98141b578fbSJesse Barnes } 98241b578fbSJesse Barnes 98341b578fbSJesse Barnes /* 98480715b2fSVille Syrjälä * See update_scanline_offset() for the details on the 98580715b2fSVille Syrjälä * scanline_offset adjustment. 986a225f079SVille Syrjälä */ 98780715b2fSVille Syrjälä return (position + crtc->scanline_offset) % vtotal; 988a225f079SVille Syrjälä } 989a225f079SVille Syrjälä 9901bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 9911bf6ad62SDaniel Vetter bool in_vblank_irq, int *vpos, int *hpos, 9923bb403bfSVille Syrjälä ktime_t *stime, ktime_t *etime, 9933bb403bfSVille Syrjälä const struct drm_display_mode *mode) 9940af7e4dfSMario Kleiner { 995fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 99698187836SVille Syrjälä struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 99798187836SVille Syrjälä pipe); 9983aa18df8SVille Syrjälä int position; 99978e8fc6bSVille Syrjälä int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000ad3543edSMario Kleiner unsigned long irqflags; 10010af7e4dfSMario Kleiner 1002fc467a22SMaarten Lankhorst if (WARN_ON(!mode->crtc_clock)) { 10030af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 10049db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 10051bf6ad62SDaniel Vetter return false; 10060af7e4dfSMario Kleiner } 10070af7e4dfSMario Kleiner 1008c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 100978e8fc6bSVille Syrjälä hsync_start = mode->crtc_hsync_start; 1010c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 1011c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 1012c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 10130af7e4dfSMario Kleiner 1014d31faf65SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015d31faf65SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016d31faf65SVille Syrjälä vbl_end /= 2; 1017d31faf65SVille Syrjälä vtotal /= 2; 1018d31faf65SVille Syrjälä } 1019d31faf65SVille Syrjälä 1020ad3543edSMario Kleiner /* 1021ad3543edSMario Kleiner * Lock uncore.lock, as we will do multiple timing critical raw 1022ad3543edSMario Kleiner * register reads, potentially with preemption disabled, so the 1023ad3543edSMario Kleiner * following code must not block on uncore.lock. 1024ad3543edSMario Kleiner */ 1025ad3543edSMario Kleiner spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026ad3543edSMario Kleiner 1027ad3543edSMario Kleiner /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028ad3543edSMario Kleiner 1029ad3543edSMario Kleiner /* Get optional system timestamp before query. */ 1030ad3543edSMario Kleiner if (stime) 1031ad3543edSMario Kleiner *stime = ktime_get(); 1032ad3543edSMario Kleiner 103391d14251STvrtko Ursulin if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10340af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 10350af7e4dfSMario Kleiner * scanout position from Display scan line register. 10360af7e4dfSMario Kleiner */ 1037a225f079SVille Syrjälä position = __intel_get_crtc_scanline(intel_crtc); 10380af7e4dfSMario Kleiner } else { 10390af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 10400af7e4dfSMario Kleiner * We can split this into vertical and horizontal 10410af7e4dfSMario Kleiner * scanout position. 10420af7e4dfSMario Kleiner */ 104375aa3f63SVille Syrjälä position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 10440af7e4dfSMario Kleiner 10453aa18df8SVille Syrjälä /* convert to pixel counts */ 10463aa18df8SVille Syrjälä vbl_start *= htotal; 10473aa18df8SVille Syrjälä vbl_end *= htotal; 10483aa18df8SVille Syrjälä vtotal *= htotal; 104978e8fc6bSVille Syrjälä 105078e8fc6bSVille Syrjälä /* 10517e78f1cbSVille Syrjälä * In interlaced modes, the pixel counter counts all pixels, 10527e78f1cbSVille Syrjälä * so one field will have htotal more pixels. In order to avoid 10537e78f1cbSVille Syrjälä * the reported position from jumping backwards when the pixel 10547e78f1cbSVille Syrjälä * counter is beyond the length of the shorter field, just 10557e78f1cbSVille Syrjälä * clamp the position the length of the shorter field. This 10567e78f1cbSVille Syrjälä * matches how the scanline counter based position works since 10577e78f1cbSVille Syrjälä * the scanline counter doesn't count the two half lines. 10587e78f1cbSVille Syrjälä */ 10597e78f1cbSVille Syrjälä if (position >= vtotal) 10607e78f1cbSVille Syrjälä position = vtotal - 1; 10617e78f1cbSVille Syrjälä 10627e78f1cbSVille Syrjälä /* 106378e8fc6bSVille Syrjälä * Start of vblank interrupt is triggered at start of hsync, 106478e8fc6bSVille Syrjälä * just prior to the first active line of vblank. However we 106578e8fc6bSVille Syrjälä * consider lines to start at the leading edge of horizontal 106678e8fc6bSVille Syrjälä * active. So, should we get here before we've crossed into 106778e8fc6bSVille Syrjälä * the horizontal active of the first line in vblank, we would 106878e8fc6bSVille Syrjälä * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 106978e8fc6bSVille Syrjälä * always add htotal-hsync_start to the current pixel position. 107078e8fc6bSVille Syrjälä */ 107178e8fc6bSVille Syrjälä position = (position + htotal - hsync_start) % vtotal; 10723aa18df8SVille Syrjälä } 10733aa18df8SVille Syrjälä 1074ad3543edSMario Kleiner /* Get optional system timestamp after query. */ 1075ad3543edSMario Kleiner if (etime) 1076ad3543edSMario Kleiner *etime = ktime_get(); 1077ad3543edSMario Kleiner 1078ad3543edSMario Kleiner /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079ad3543edSMario Kleiner 1080ad3543edSMario Kleiner spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081ad3543edSMario Kleiner 10823aa18df8SVille Syrjälä /* 10833aa18df8SVille Syrjälä * While in vblank, position will be negative 10843aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 10853aa18df8SVille Syrjälä * vblank, position will be positive counting 10863aa18df8SVille Syrjälä * up since vbl_end. 10873aa18df8SVille Syrjälä */ 10883aa18df8SVille Syrjälä if (position >= vbl_start) 10893aa18df8SVille Syrjälä position -= vbl_end; 10903aa18df8SVille Syrjälä else 10913aa18df8SVille Syrjälä position += vtotal - vbl_end; 10923aa18df8SVille Syrjälä 109391d14251STvrtko Ursulin if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10943aa18df8SVille Syrjälä *vpos = position; 10953aa18df8SVille Syrjälä *hpos = 0; 10963aa18df8SVille Syrjälä } else { 10970af7e4dfSMario Kleiner *vpos = position / htotal; 10980af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 10990af7e4dfSMario Kleiner } 11000af7e4dfSMario Kleiner 11011bf6ad62SDaniel Vetter return true; 11020af7e4dfSMario Kleiner } 11030af7e4dfSMario Kleiner 1104a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105a225f079SVille Syrjälä { 1106fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107a225f079SVille Syrjälä unsigned long irqflags; 1108a225f079SVille Syrjälä int position; 1109a225f079SVille Syrjälä 1110a225f079SVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111a225f079SVille Syrjälä position = __intel_get_crtc_scanline(crtc); 1112a225f079SVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113a225f079SVille Syrjälä 1114a225f079SVille Syrjälä return position; 1115a225f079SVille Syrjälä } 1116a225f079SVille Syrjälä 111791d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118f97108d1SJesse Barnes { 1119b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 11209270388eSDaniel Vetter u8 new_delay; 11219270388eSDaniel Vetter 1122d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 1123f97108d1SJesse Barnes 112473edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 112573edd18fSDaniel Vetter 112620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 11279270388eSDaniel Vetter 11287648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 1130b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 1131f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 1132f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 1133f97108d1SJesse Barnes 1134f97108d1SJesse Barnes /* Handle RCS change request from hw */ 1135b5b72e89SMatthew Garrett if (busy_up > max_avg) { 113620e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 113720e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 113820e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 113920e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 1140b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 114120e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 114220e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 114320e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 114420e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 1145f97108d1SJesse Barnes } 1146f97108d1SJesse Barnes 114791d14251STvrtko Ursulin if (ironlake_set_drps(dev_priv, new_delay)) 114820e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 1149f97108d1SJesse Barnes 1150d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 11519270388eSDaniel Vetter 1152f97108d1SJesse Barnes return; 1153f97108d1SJesse Barnes } 1154f97108d1SJesse Barnes 11550bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine) 1156549f7365SChris Wilson { 11573f88325cSChris Wilson const u32 seqno = intel_engine_get_seqno(engine); 1158e61e0f51SChris Wilson struct i915_request *rq = NULL; 11593f88325cSChris Wilson struct task_struct *tsk = NULL; 116056299fb7SChris Wilson struct intel_wait *wait; 1161dffabc8fSTvrtko Ursulin 11623f88325cSChris Wilson if (unlikely(!engine->breadcrumbs.irq_armed)) 1163bcbd5c33SChris Wilson return; 1164bcbd5c33SChris Wilson 11653f88325cSChris Wilson rcu_read_lock(); 116656299fb7SChris Wilson 116761d3dc70SChris Wilson spin_lock(&engine->breadcrumbs.irq_lock); 116861d3dc70SChris Wilson wait = engine->breadcrumbs.irq_wait; 116956299fb7SChris Wilson if (wait) { 11703f88325cSChris Wilson /* 11713f88325cSChris Wilson * We use a callback from the dma-fence to submit 117256299fb7SChris Wilson * requests after waiting on our own requests. To 117356299fb7SChris Wilson * ensure minimum delay in queuing the next request to 117456299fb7SChris Wilson * hardware, signal the fence now rather than wait for 117556299fb7SChris Wilson * the signaler to be woken up. We still wake up the 117656299fb7SChris Wilson * waiter in order to handle the irq-seqno coherency 117756299fb7SChris Wilson * issues (we may receive the interrupt before the 117856299fb7SChris Wilson * seqno is written, see __i915_request_irq_complete()) 117956299fb7SChris Wilson * and to handle coalescing of multiple seqno updates 118056299fb7SChris Wilson * and many waiters. 118156299fb7SChris Wilson */ 11823f88325cSChris Wilson if (i915_seqno_passed(seqno, wait->seqno)) { 1183e61e0f51SChris Wilson struct i915_request *waiter = wait->request; 1184de4d2106SChris Wilson 1185e3be4079SChris Wilson if (waiter && 1186e3be4079SChris Wilson !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187de4d2106SChris Wilson &waiter->fence.flags) && 1188de4d2106SChris Wilson intel_wait_check_request(wait, waiter)) 1189e61e0f51SChris Wilson rq = i915_request_get(waiter); 119056299fb7SChris Wilson 11913f88325cSChris Wilson tsk = wait->tsk; 11923f88325cSChris Wilson } else { 119369dc4d00SChris Wilson if (engine->irq_seqno_barrier && 119469dc4d00SChris Wilson i915_seqno_passed(seqno, wait->seqno - 1)) { 11953f88325cSChris Wilson set_bit(ENGINE_IRQ_BREADCRUMB, 11963f88325cSChris Wilson &engine->irq_posted); 11973f88325cSChris Wilson tsk = wait->tsk; 11983f88325cSChris Wilson } 11993f88325cSChris Wilson } 120078796877SChris Wilson 120178796877SChris Wilson engine->breadcrumbs.irq_count++; 120267b807a8SChris Wilson } else { 1203bcbd5c33SChris Wilson if (engine->breadcrumbs.irq_armed) 120467b807a8SChris Wilson __intel_engine_disarm_breadcrumbs(engine); 120556299fb7SChris Wilson } 120661d3dc70SChris Wilson spin_unlock(&engine->breadcrumbs.irq_lock); 120756299fb7SChris Wilson 120824754d75SChris Wilson if (rq) { 1209e3be4079SChris Wilson spin_lock(&rq->lock); 1210e3be4079SChris Wilson dma_fence_signal_locked(&rq->fence); 12114e9a8befSChris Wilson GEM_BUG_ON(!i915_request_completed(rq)); 1212e3be4079SChris Wilson spin_unlock(&rq->lock); 1213e3be4079SChris Wilson 1214e61e0f51SChris Wilson i915_request_put(rq); 121524754d75SChris Wilson } 121656299fb7SChris Wilson 12173f88325cSChris Wilson if (tsk && tsk->state & TASK_NORMAL) 12183f88325cSChris Wilson wake_up_process(tsk); 12193f88325cSChris Wilson 12203f88325cSChris Wilson rcu_read_unlock(); 12213f88325cSChris Wilson 122256299fb7SChris Wilson trace_intel_engine_notify(engine, wait); 1223549f7365SChris Wilson } 1224549f7365SChris Wilson 122543cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv, 122643cf3bf0SChris Wilson struct intel_rps_ei *ei) 122731685c25SDeepak S { 1228679cb6c1SMika Kuoppala ei->ktime = ktime_get_raw(); 122943cf3bf0SChris Wilson ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 123043cf3bf0SChris Wilson ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 123131685c25SDeepak S } 123231685c25SDeepak S 123343cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 123443cf3bf0SChris Wilson { 1235562d9baeSSagar Arun Kamble memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 123643cf3bf0SChris Wilson } 123743cf3bf0SChris Wilson 123843cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 123943cf3bf0SChris Wilson { 1240562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241562d9baeSSagar Arun Kamble const struct intel_rps_ei *prev = &rps->ei; 124243cf3bf0SChris Wilson struct intel_rps_ei now; 124343cf3bf0SChris Wilson u32 events = 0; 124443cf3bf0SChris Wilson 1245e0e8c7cbSChris Wilson if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 124643cf3bf0SChris Wilson return 0; 124743cf3bf0SChris Wilson 124843cf3bf0SChris Wilson vlv_c0_read(dev_priv, &now); 124931685c25SDeepak S 1250679cb6c1SMika Kuoppala if (prev->ktime) { 1251e0e8c7cbSChris Wilson u64 time, c0; 1252569884e3SChris Wilson u32 render, media; 1253e0e8c7cbSChris Wilson 1254679cb6c1SMika Kuoppala time = ktime_us_delta(now.ktime, prev->ktime); 12558f68d591SChris Wilson 1256e0e8c7cbSChris Wilson time *= dev_priv->czclk_freq; 1257e0e8c7cbSChris Wilson 1258e0e8c7cbSChris Wilson /* Workload can be split between render + media, 1259e0e8c7cbSChris Wilson * e.g. SwapBuffers being blitted in X after being rendered in 1260e0e8c7cbSChris Wilson * mesa. To account for this we need to combine both engines 1261e0e8c7cbSChris Wilson * into our activity counter. 1262e0e8c7cbSChris Wilson */ 1263569884e3SChris Wilson render = now.render_c0 - prev->render_c0; 1264569884e3SChris Wilson media = now.media_c0 - prev->media_c0; 1265569884e3SChris Wilson c0 = max(render, media); 12666b7f6aa7SMika Kuoppala c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267e0e8c7cbSChris Wilson 126860548c55SChris Wilson if (c0 > time * rps->power.up_threshold) 1269e0e8c7cbSChris Wilson events = GEN6_PM_RP_UP_THRESHOLD; 127060548c55SChris Wilson else if (c0 < time * rps->power.down_threshold) 1271e0e8c7cbSChris Wilson events = GEN6_PM_RP_DOWN_THRESHOLD; 127231685c25SDeepak S } 127331685c25SDeepak S 1274562d9baeSSagar Arun Kamble rps->ei = now; 127543cf3bf0SChris Wilson return events; 127631685c25SDeepak S } 127731685c25SDeepak S 12784912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 12793b8d8d91SJesse Barnes { 12802d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1281562d9baeSSagar Arun Kamble container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 12837c0a16adSChris Wilson bool client_boost = false; 12848d3afd7dSChris Wilson int new_delay, adj, min, max; 12857c0a16adSChris Wilson u32 pm_iir = 0; 12863b8d8d91SJesse Barnes 128759cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1288562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1289562d9baeSSagar Arun Kamble pm_iir = fetch_and_zero(&rps->pm_iir); 1290562d9baeSSagar Arun Kamble client_boost = atomic_read(&rps->num_waiters); 1291d4d70aa5SImre Deak } 129259cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 12934912d041SBen Widawsky 129460611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 1295a6706b45SDeepak S WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 12968d3afd7dSChris Wilson if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 12977c0a16adSChris Wilson goto out; 12983b8d8d91SJesse Barnes 12999f817501SSagar Arun Kamble mutex_lock(&dev_priv->pcu_lock); 13007b9e0ae6SChris Wilson 130143cf3bf0SChris Wilson pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 130243cf3bf0SChris Wilson 1303562d9baeSSagar Arun Kamble adj = rps->last_adj; 1304562d9baeSSagar Arun Kamble new_delay = rps->cur_freq; 1305562d9baeSSagar Arun Kamble min = rps->min_freq_softlimit; 1306562d9baeSSagar Arun Kamble max = rps->max_freq_softlimit; 13077b92c1bdSChris Wilson if (client_boost) 1308562d9baeSSagar Arun Kamble max = rps->max_freq; 1309562d9baeSSagar Arun Kamble if (client_boost && new_delay < rps->boost_freq) { 1310562d9baeSSagar Arun Kamble new_delay = rps->boost_freq; 13118d3afd7dSChris Wilson adj = 0; 13128d3afd7dSChris Wilson } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313dd75fdc8SChris Wilson if (adj > 0) 1314dd75fdc8SChris Wilson adj *= 2; 1315edcf284bSChris Wilson else /* CHV needs even encode values */ 1316edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 13177e79a683SSagar Arun Kamble 1318562d9baeSSagar Arun Kamble if (new_delay >= rps->max_freq_softlimit) 13197e79a683SSagar Arun Kamble adj = 0; 13207b92c1bdSChris Wilson } else if (client_boost) { 1321f5a4c67dSChris Wilson adj = 0; 1322dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323562d9baeSSagar Arun Kamble if (rps->cur_freq > rps->efficient_freq) 1324562d9baeSSagar Arun Kamble new_delay = rps->efficient_freq; 1325562d9baeSSagar Arun Kamble else if (rps->cur_freq > rps->min_freq_softlimit) 1326562d9baeSSagar Arun Kamble new_delay = rps->min_freq_softlimit; 1327dd75fdc8SChris Wilson adj = 0; 1328dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329dd75fdc8SChris Wilson if (adj < 0) 1330dd75fdc8SChris Wilson adj *= 2; 1331edcf284bSChris Wilson else /* CHV needs even encode values */ 1332edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 13337e79a683SSagar Arun Kamble 1334562d9baeSSagar Arun Kamble if (new_delay <= rps->min_freq_softlimit) 13357e79a683SSagar Arun Kamble adj = 0; 1336dd75fdc8SChris Wilson } else { /* unknown event */ 1337edcf284bSChris Wilson adj = 0; 1338dd75fdc8SChris Wilson } 13393b8d8d91SJesse Barnes 1340562d9baeSSagar Arun Kamble rps->last_adj = adj; 1341edcf284bSChris Wilson 134279249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 134379249636SBen Widawsky * interrupt 134479249636SBen Widawsky */ 1345edcf284bSChris Wilson new_delay += adj; 13468d3afd7dSChris Wilson new_delay = clamp_t(int, new_delay, min, max); 134727544369SDeepak S 13489fcee2f7SChris Wilson if (intel_set_rps(dev_priv, new_delay)) { 13499fcee2f7SChris Wilson DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350562d9baeSSagar Arun Kamble rps->last_adj = 0; 13519fcee2f7SChris Wilson } 13523b8d8d91SJesse Barnes 13539f817501SSagar Arun Kamble mutex_unlock(&dev_priv->pcu_lock); 13547c0a16adSChris Wilson 13557c0a16adSChris Wilson out: 13567c0a16adSChris Wilson /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 13577c0a16adSChris Wilson spin_lock_irq(&dev_priv->irq_lock); 1358562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) 13597c0a16adSChris Wilson gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 13607c0a16adSChris Wilson spin_unlock_irq(&dev_priv->irq_lock); 13613b8d8d91SJesse Barnes } 13623b8d8d91SJesse Barnes 1363e3689190SBen Widawsky 1364e3689190SBen Widawsky /** 1365e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366e3689190SBen Widawsky * occurred. 1367e3689190SBen Widawsky * @work: workqueue struct 1368e3689190SBen Widawsky * 1369e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 1370e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 1371e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 1372e3689190SBen Widawsky */ 1373e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 1374e3689190SBen Widawsky { 13752d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1376cefcff8fSJoonas Lahtinen container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377e3689190SBen Widawsky u32 error_status, row, bank, subbank; 137835a85ac6SBen Widawsky char *parity_event[6]; 1379e3689190SBen Widawsky uint32_t misccpctl; 138035a85ac6SBen Widawsky uint8_t slice = 0; 1381e3689190SBen Widawsky 1382e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1383e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1384e3689190SBen Widawsky * any time we access those registers. 1385e3689190SBen Widawsky */ 138691c8a326SChris Wilson mutex_lock(&dev_priv->drm.struct_mutex); 1387e3689190SBen Widawsky 138835a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 138935a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 139035a85ac6SBen Widawsky goto out; 139135a85ac6SBen Widawsky 1392e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1393e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1395e3689190SBen Widawsky 139635a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397f0f59a00SVille Syrjälä i915_reg_t reg; 139835a85ac6SBen Widawsky 139935a85ac6SBen Widawsky slice--; 14002d1fe073SJoonas Lahtinen if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 140135a85ac6SBen Widawsky break; 140235a85ac6SBen Widawsky 140335a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 140435a85ac6SBen Widawsky 14056fa1c5f1SVille Syrjälä reg = GEN7_L3CDERRST1(slice); 140635a85ac6SBen Widawsky 140735a85ac6SBen Widawsky error_status = I915_READ(reg); 1408e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1409e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1410e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411e3689190SBen Widawsky 141235a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 141335a85ac6SBen Widawsky POSTING_READ(reg); 1414e3689190SBen Widawsky 1415cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 141935a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 142035a85ac6SBen Widawsky parity_event[5] = NULL; 1421e3689190SBen Widawsky 142291c8a326SChris Wilson kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1424e3689190SBen Widawsky 142535a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 142635a85ac6SBen Widawsky slice, row, bank, subbank); 1427e3689190SBen Widawsky 142835a85ac6SBen Widawsky kfree(parity_event[4]); 1429e3689190SBen Widawsky kfree(parity_event[3]); 1430e3689190SBen Widawsky kfree(parity_event[2]); 1431e3689190SBen Widawsky kfree(parity_event[1]); 1432e3689190SBen Widawsky } 1433e3689190SBen Widawsky 143435a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 143535a85ac6SBen Widawsky 143635a85ac6SBen Widawsky out: 143735a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 14384cb21832SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 14392d1fe073SJoonas Lahtinen gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 14404cb21832SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 144135a85ac6SBen Widawsky 144291c8a326SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 144335a85ac6SBen Widawsky } 144435a85ac6SBen Widawsky 1445261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446261e40b8SVille Syrjälä u32 iir) 1447e3689190SBen Widawsky { 1448261e40b8SVille Syrjälä if (!HAS_L3_DPF(dev_priv)) 1449e3689190SBen Widawsky return; 1450e3689190SBen Widawsky 1451d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1452261e40b8SVille Syrjälä gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1454e3689190SBen Widawsky 1455261e40b8SVille Syrjälä iir &= GT_PARITY_ERROR(dev_priv); 145635a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 145735a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 145835a85ac6SBen Widawsky 145935a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 146035a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 146135a85ac6SBen Widawsky 1462a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463e3689190SBen Widawsky } 1464e3689190SBen Widawsky 1465261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466f1af8fc1SPaulo Zanoni u32 gt_iir) 1467f1af8fc1SPaulo Zanoni { 1468f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14693b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1470f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 14713b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1472f1af8fc1SPaulo Zanoni } 1473f1af8fc1SPaulo Zanoni 1474261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475e7b4c6b1SDaniel Vetter u32 gt_iir) 1476e7b4c6b1SDaniel Vetter { 1477f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14783b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1479cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 14803b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1481cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 14823b3f1650SAkash Goel notify_ring(dev_priv->engine[BCS]); 1483e7b4c6b1SDaniel Vetter 1484cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1486aaecdf61SDaniel Vetter GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488e3689190SBen Widawsky 1489261e40b8SVille Syrjälä if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490261e40b8SVille Syrjälä ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491e7b4c6b1SDaniel Vetter } 1492e7b4c6b1SDaniel Vetter 14935d3d69d5SChris Wilson static void 149451f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495fbcc1a0cSNick Hoath { 149631de7350SChris Wilson bool tasklet = false; 1497f747026cSChris Wilson 1498fd8526e5SChris Wilson if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 14998ea397faSChris Wilson tasklet = true; 150031de7350SChris Wilson 150151f6b0f9SChris Wilson if (iir & GT_RENDER_USER_INTERRUPT) { 150231de7350SChris Wilson notify_ring(engine); 150393ffbe8eSMichal Wajdeczko tasklet |= USES_GUC_SUBMISSION(engine->i915); 150431de7350SChris Wilson } 150531de7350SChris Wilson 150631de7350SChris Wilson if (tasklet) 1507fd8526e5SChris Wilson tasklet_hi_schedule(&engine->execlists.tasklet); 1508fbcc1a0cSNick Hoath } 1509fbcc1a0cSNick Hoath 15102e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915, 151155ef72f2SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1512abd58f01SBen Widawsky { 15132e4a5b25SChris Wilson void __iomem * const regs = i915->regs; 15142e4a5b25SChris Wilson 1515f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516f0fd96f5SChris Wilson GEN8_GT_BCS_IRQ | \ 1517f0fd96f5SChris Wilson GEN8_GT_VCS1_IRQ | \ 1518f0fd96f5SChris Wilson GEN8_GT_VCS2_IRQ | \ 1519f0fd96f5SChris Wilson GEN8_GT_VECS_IRQ | \ 1520f0fd96f5SChris Wilson GEN8_GT_PM_IRQ | \ 1521f0fd96f5SChris Wilson GEN8_GT_GUC_IRQ) 1522f0fd96f5SChris Wilson 1523abd58f01SBen Widawsky if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15242e4a5b25SChris Wilson gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 15252e4a5b25SChris Wilson if (likely(gt_iir[0])) 15262e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527abd58f01SBen Widawsky } 1528abd58f01SBen Widawsky 152985f9b5f9SZhao Yakui if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15302e4a5b25SChris Wilson gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 15312e4a5b25SChris Wilson if (likely(gt_iir[1])) 15322e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 153374cdb337SChris Wilson } 153474cdb337SChris Wilson 153526705e20SSagar Arun Kamble if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15362e4a5b25SChris Wilson gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1537f4de7794SChris Wilson if (likely(gt_iir[2])) 1538f4de7794SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 15390961021aSBen Widawsky } 15402e4a5b25SChris Wilson 15412e4a5b25SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15422e4a5b25SChris Wilson gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 15432e4a5b25SChris Wilson if (likely(gt_iir[3])) 15442e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 154555ef72f2SChris Wilson } 1546abd58f01SBen Widawsky } 1547abd58f01SBen Widawsky 15482e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1549f0fd96f5SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1550e30e251aSVille Syrjälä { 1551f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15522e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[RCS], 155351f6b0f9SChris Wilson gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 15542e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[BCS], 155551f6b0f9SChris Wilson gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1556e30e251aSVille Syrjälä } 1557e30e251aSVille Syrjälä 1558f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15592e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS], 156051f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 15612e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS2], 156251f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1563e30e251aSVille Syrjälä } 1564e30e251aSVille Syrjälä 1565f0fd96f5SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15662e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VECS], 156751f6b0f9SChris Wilson gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1568f0fd96f5SChris Wilson } 1569e30e251aSVille Syrjälä 1570f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15712e4a5b25SChris Wilson gen6_rps_irq_handler(i915, gt_iir[2]); 15722e4a5b25SChris Wilson gen9_guc_irq_handler(i915, gt_iir[2]); 1573e30e251aSVille Syrjälä } 1574f0fd96f5SChris Wilson } 1575e30e251aSVille Syrjälä 1576af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1577121e758eSDhinakaran Pandiyan { 1578af92058fSVille Syrjälä switch (pin) { 1579af92058fSVille Syrjälä case HPD_PORT_C: 1580121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1581af92058fSVille Syrjälä case HPD_PORT_D: 1582121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1583af92058fSVille Syrjälä case HPD_PORT_E: 1584121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1585af92058fSVille Syrjälä case HPD_PORT_F: 1586121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1587121e758eSDhinakaran Pandiyan default: 1588121e758eSDhinakaran Pandiyan return false; 1589121e758eSDhinakaran Pandiyan } 1590121e758eSDhinakaran Pandiyan } 1591121e758eSDhinakaran Pandiyan 1592af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 159363c88d22SImre Deak { 1594af92058fSVille Syrjälä switch (pin) { 1595af92058fSVille Syrjälä case HPD_PORT_A: 1596195baa06SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1597af92058fSVille Syrjälä case HPD_PORT_B: 159863c88d22SImre Deak return val & PORTB_HOTPLUG_LONG_DETECT; 1599af92058fSVille Syrjälä case HPD_PORT_C: 160063c88d22SImre Deak return val & PORTC_HOTPLUG_LONG_DETECT; 160163c88d22SImre Deak default: 160263c88d22SImre Deak return false; 160363c88d22SImre Deak } 160463c88d22SImre Deak } 160563c88d22SImre Deak 1606af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 160731604222SAnusha Srivatsa { 1608af92058fSVille Syrjälä switch (pin) { 1609af92058fSVille Syrjälä case HPD_PORT_A: 161031604222SAnusha Srivatsa return val & ICP_DDIA_HPD_LONG_DETECT; 1611af92058fSVille Syrjälä case HPD_PORT_B: 161231604222SAnusha Srivatsa return val & ICP_DDIB_HPD_LONG_DETECT; 161331604222SAnusha Srivatsa default: 161431604222SAnusha Srivatsa return false; 161531604222SAnusha Srivatsa } 161631604222SAnusha Srivatsa } 161731604222SAnusha Srivatsa 1618af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 161931604222SAnusha Srivatsa { 1620af92058fSVille Syrjälä switch (pin) { 1621af92058fSVille Syrjälä case HPD_PORT_C: 162231604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1623af92058fSVille Syrjälä case HPD_PORT_D: 162431604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1625af92058fSVille Syrjälä case HPD_PORT_E: 162631604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1627af92058fSVille Syrjälä case HPD_PORT_F: 162831604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 162931604222SAnusha Srivatsa default: 163031604222SAnusha Srivatsa return false; 163131604222SAnusha Srivatsa } 163231604222SAnusha Srivatsa } 163331604222SAnusha Srivatsa 1634af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 16356dbf30ceSVille Syrjälä { 1636af92058fSVille Syrjälä switch (pin) { 1637af92058fSVille Syrjälä case HPD_PORT_E: 16386dbf30ceSVille Syrjälä return val & PORTE_HOTPLUG_LONG_DETECT; 16396dbf30ceSVille Syrjälä default: 16406dbf30ceSVille Syrjälä return false; 16416dbf30ceSVille Syrjälä } 16426dbf30ceSVille Syrjälä } 16436dbf30ceSVille Syrjälä 1644af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 164574c0b395SVille Syrjälä { 1646af92058fSVille Syrjälä switch (pin) { 1647af92058fSVille Syrjälä case HPD_PORT_A: 164874c0b395SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1649af92058fSVille Syrjälä case HPD_PORT_B: 165074c0b395SVille Syrjälä return val & PORTB_HOTPLUG_LONG_DETECT; 1651af92058fSVille Syrjälä case HPD_PORT_C: 165274c0b395SVille Syrjälä return val & PORTC_HOTPLUG_LONG_DETECT; 1653af92058fSVille Syrjälä case HPD_PORT_D: 165474c0b395SVille Syrjälä return val & PORTD_HOTPLUG_LONG_DETECT; 165574c0b395SVille Syrjälä default: 165674c0b395SVille Syrjälä return false; 165774c0b395SVille Syrjälä } 165874c0b395SVille Syrjälä } 165974c0b395SVille Syrjälä 1660af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1661e4ce95aaSVille Syrjälä { 1662af92058fSVille Syrjälä switch (pin) { 1663af92058fSVille Syrjälä case HPD_PORT_A: 1664e4ce95aaSVille Syrjälä return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1665e4ce95aaSVille Syrjälä default: 1666e4ce95aaSVille Syrjälä return false; 1667e4ce95aaSVille Syrjälä } 1668e4ce95aaSVille Syrjälä } 1669e4ce95aaSVille Syrjälä 1670af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 167113cf5504SDave Airlie { 1672af92058fSVille Syrjälä switch (pin) { 1673af92058fSVille Syrjälä case HPD_PORT_B: 1674676574dfSJani Nikula return val & PORTB_HOTPLUG_LONG_DETECT; 1675af92058fSVille Syrjälä case HPD_PORT_C: 1676676574dfSJani Nikula return val & PORTC_HOTPLUG_LONG_DETECT; 1677af92058fSVille Syrjälä case HPD_PORT_D: 1678676574dfSJani Nikula return val & PORTD_HOTPLUG_LONG_DETECT; 1679676574dfSJani Nikula default: 1680676574dfSJani Nikula return false; 168113cf5504SDave Airlie } 168213cf5504SDave Airlie } 168313cf5504SDave Airlie 1684af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 168513cf5504SDave Airlie { 1686af92058fSVille Syrjälä switch (pin) { 1687af92058fSVille Syrjälä case HPD_PORT_B: 1688676574dfSJani Nikula return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1689af92058fSVille Syrjälä case HPD_PORT_C: 1690676574dfSJani Nikula return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1691af92058fSVille Syrjälä case HPD_PORT_D: 1692676574dfSJani Nikula return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1693676574dfSJani Nikula default: 1694676574dfSJani Nikula return false; 169513cf5504SDave Airlie } 169613cf5504SDave Airlie } 169713cf5504SDave Airlie 169842db67d6SVille Syrjälä /* 169942db67d6SVille Syrjälä * Get a bit mask of pins that have triggered, and which ones may be long. 170042db67d6SVille Syrjälä * This can be called multiple times with the same masks to accumulate 170142db67d6SVille Syrjälä * hotplug detection results from several registers. 170242db67d6SVille Syrjälä * 170342db67d6SVille Syrjälä * Note that the caller is expected to zero out the masks initially. 170442db67d6SVille Syrjälä */ 1705cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1706cf53902fSRodrigo Vivi u32 *pin_mask, u32 *long_mask, 17078c841e57SJani Nikula u32 hotplug_trigger, u32 dig_hotplug_reg, 1708fd63e2a9SImre Deak const u32 hpd[HPD_NUM_PINS], 1709af92058fSVille Syrjälä bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1710676574dfSJani Nikula { 1711e9be2850SVille Syrjälä enum hpd_pin pin; 1712676574dfSJani Nikula 1713e9be2850SVille Syrjälä for_each_hpd_pin(pin) { 1714e9be2850SVille Syrjälä if ((hpd[pin] & hotplug_trigger) == 0) 17158c841e57SJani Nikula continue; 17168c841e57SJani Nikula 1717e9be2850SVille Syrjälä *pin_mask |= BIT(pin); 1718676574dfSJani Nikula 1719af92058fSVille Syrjälä if (long_pulse_detect(pin, dig_hotplug_reg)) 1720e9be2850SVille Syrjälä *long_mask |= BIT(pin); 1721676574dfSJani Nikula } 1722676574dfSJani Nikula 1723f88f0478SVille Syrjälä DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1724f88f0478SVille Syrjälä hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1725676574dfSJani Nikula 1726676574dfSJani Nikula } 1727676574dfSJani Nikula 172891d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1729515ac2bbSDaniel Vetter { 173028c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1731515ac2bbSDaniel Vetter } 1732515ac2bbSDaniel Vetter 173391d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1734ce99c256SDaniel Vetter { 17359ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1736ce99c256SDaniel Vetter } 1737ce99c256SDaniel Vetter 17388bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 173991d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 174091d14251STvrtko Ursulin enum pipe pipe, 1741eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1742eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 17438bc5e955SDaniel Vetter uint32_t crc4) 17448bf1e9f1SShuang He { 17458bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 17468c6b709dSTomeu Vizoso struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17478c6b709dSTomeu Vizoso uint32_t crcs[5]; 1748b2c88f5bSDamien Lespiau 1749d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 17508c6b709dSTomeu Vizoso /* 17518c6b709dSTomeu Vizoso * For some not yet identified reason, the first CRC is 17528c6b709dSTomeu Vizoso * bonkers. So let's just wait for the next vblank and read 17538c6b709dSTomeu Vizoso * out the buggy result. 17548c6b709dSTomeu Vizoso * 1755163e8aecSRodrigo Vivi * On GEN8+ sometimes the second CRC is bonkers as well, so 17568c6b709dSTomeu Vizoso * don't trust that one either. 17578c6b709dSTomeu Vizoso */ 1758033b7a23SMaarten Lankhorst if (pipe_crc->skipped <= 0 || 1759163e8aecSRodrigo Vivi (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 17608c6b709dSTomeu Vizoso pipe_crc->skipped++; 17618c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17628c6b709dSTomeu Vizoso return; 17638c6b709dSTomeu Vizoso } 17648c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17656cc42152SMaarten Lankhorst 17668c6b709dSTomeu Vizoso crcs[0] = crc0; 17678c6b709dSTomeu Vizoso crcs[1] = crc1; 17688c6b709dSTomeu Vizoso crcs[2] = crc2; 17698c6b709dSTomeu Vizoso crcs[3] = crc3; 17708c6b709dSTomeu Vizoso crcs[4] = crc4; 1771246ee524STomeu Vizoso drm_crtc_add_crc_entry(&crtc->base, true, 1772ca814b25SDaniel Vetter drm_crtc_accurate_vblank_count(&crtc->base), 1773246ee524STomeu Vizoso crcs); 17748c6b709dSTomeu Vizoso } 1775277de95eSDaniel Vetter #else 1776277de95eSDaniel Vetter static inline void 177791d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 177891d14251STvrtko Ursulin enum pipe pipe, 1779277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1780277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1781277de95eSDaniel Vetter uint32_t crc4) {} 1782277de95eSDaniel Vetter #endif 1783eba94eb9SDaniel Vetter 1784277de95eSDaniel Vetter 178591d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 178691d14251STvrtko Ursulin enum pipe pipe) 17875a69b89fSDaniel Vetter { 178891d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 17895a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 17905a69b89fSDaniel Vetter 0, 0, 0, 0); 17915a69b89fSDaniel Vetter } 17925a69b89fSDaniel Vetter 179391d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 179491d14251STvrtko Ursulin enum pipe pipe) 1795eba94eb9SDaniel Vetter { 179691d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 1797eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1798eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1799eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1800eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 18018bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1802eba94eb9SDaniel Vetter } 18035b3a856bSDaniel Vetter 180491d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 180591d14251STvrtko Ursulin enum pipe pipe) 18065b3a856bSDaniel Vetter { 18070b5c5ed0SDaniel Vetter uint32_t res1, res2; 18080b5c5ed0SDaniel Vetter 180991d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 3) 18100b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 18110b5c5ed0SDaniel Vetter else 18120b5c5ed0SDaniel Vetter res1 = 0; 18130b5c5ed0SDaniel Vetter 181491d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 18150b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 18160b5c5ed0SDaniel Vetter else 18170b5c5ed0SDaniel Vetter res2 = 0; 18185b3a856bSDaniel Vetter 181991d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 18200b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 18210b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 18220b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 18230b5c5ed0SDaniel Vetter res1, res2); 18245b3a856bSDaniel Vetter } 18258bf1e9f1SShuang He 18261403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 18271403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 18281403c0d4SPaulo Zanoni * the work queue. */ 18291403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1830baf02a1fSBen Widawsky { 1831562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1832562d9baeSSagar Arun Kamble 1833a6706b45SDeepak S if (pm_iir & dev_priv->pm_rps_events) { 183459cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 1835f4e9af4fSAkash Goel gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1836562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1837562d9baeSSagar Arun Kamble rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1838562d9baeSSagar Arun Kamble schedule_work(&rps->work); 183941a05a3aSDaniel Vetter } 1840d4d70aa5SImre Deak spin_unlock(&dev_priv->irq_lock); 1841d4d70aa5SImre Deak } 1842baf02a1fSBen Widawsky 1843bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 1844c9a9a268SImre Deak return; 1845c9a9a268SImre Deak 18462d1fe073SJoonas Lahtinen if (HAS_VEBOX(dev_priv)) { 184712638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 18483b3f1650SAkash Goel notify_ring(dev_priv->engine[VECS]); 184912638c57SBen Widawsky 1850aaecdf61SDaniel Vetter if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1851aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 185212638c57SBen Widawsky } 18531403c0d4SPaulo Zanoni } 1854baf02a1fSBen Widawsky 185526705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 185626705e20SSagar Arun Kamble { 185793bf8096SMichal Wajdeczko if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 185893bf8096SMichal Wajdeczko intel_guc_to_host_event_handler(&dev_priv->guc); 185926705e20SSagar Arun Kamble } 186026705e20SSagar Arun Kamble 186144d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 186244d9241eSVille Syrjälä { 186344d9241eSVille Syrjälä enum pipe pipe; 186444d9241eSVille Syrjälä 186544d9241eSVille Syrjälä for_each_pipe(dev_priv, pipe) { 186644d9241eSVille Syrjälä I915_WRITE(PIPESTAT(pipe), 186744d9241eSVille Syrjälä PIPESTAT_INT_STATUS_MASK | 186844d9241eSVille Syrjälä PIPE_FIFO_UNDERRUN_STATUS); 186944d9241eSVille Syrjälä 187044d9241eSVille Syrjälä dev_priv->pipestat_irq_mask[pipe] = 0; 187144d9241eSVille Syrjälä } 187244d9241eSVille Syrjälä } 187344d9241eSVille Syrjälä 1874eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 187591d14251STvrtko Ursulin u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 18767e231dbeSJesse Barnes { 18777e231dbeSJesse Barnes int pipe; 18787e231dbeSJesse Barnes 187958ead0d7SImre Deak spin_lock(&dev_priv->irq_lock); 18801ca993d2SVille Syrjälä 18811ca993d2SVille Syrjälä if (!dev_priv->display_irqs_enabled) { 18821ca993d2SVille Syrjälä spin_unlock(&dev_priv->irq_lock); 18831ca993d2SVille Syrjälä return; 18841ca993d2SVille Syrjälä } 18851ca993d2SVille Syrjälä 1886055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 1887f0f59a00SVille Syrjälä i915_reg_t reg; 18886b12ca56SVille Syrjälä u32 status_mask, enable_mask, iir_bit = 0; 188991d181ddSImre Deak 1890bbb5eebfSDaniel Vetter /* 1891bbb5eebfSDaniel Vetter * PIPESTAT bits get signalled even when the interrupt is 1892bbb5eebfSDaniel Vetter * disabled with the mask bits, and some of the status bits do 1893bbb5eebfSDaniel Vetter * not generate interrupts at all (like the underrun bit). Hence 1894bbb5eebfSDaniel Vetter * we need to be careful that we only handle what we want to 1895bbb5eebfSDaniel Vetter * handle. 1896bbb5eebfSDaniel Vetter */ 18970f239f4cSDaniel Vetter 18980f239f4cSDaniel Vetter /* fifo underruns are filterered in the underrun handler. */ 18996b12ca56SVille Syrjälä status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1900bbb5eebfSDaniel Vetter 1901bbb5eebfSDaniel Vetter switch (pipe) { 1902bbb5eebfSDaniel Vetter case PIPE_A: 1903bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1904bbb5eebfSDaniel Vetter break; 1905bbb5eebfSDaniel Vetter case PIPE_B: 1906bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1907bbb5eebfSDaniel Vetter break; 19083278f67fSVille Syrjälä case PIPE_C: 19093278f67fSVille Syrjälä iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 19103278f67fSVille Syrjälä break; 1911bbb5eebfSDaniel Vetter } 1912bbb5eebfSDaniel Vetter if (iir & iir_bit) 19136b12ca56SVille Syrjälä status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1914bbb5eebfSDaniel Vetter 19156b12ca56SVille Syrjälä if (!status_mask) 191691d181ddSImre Deak continue; 191791d181ddSImre Deak 191891d181ddSImre Deak reg = PIPESTAT(pipe); 19196b12ca56SVille Syrjälä pipe_stats[pipe] = I915_READ(reg) & status_mask; 19206b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 19217e231dbeSJesse Barnes 19227e231dbeSJesse Barnes /* 19237e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 1924132c27c9SVille Syrjälä * 1925132c27c9SVille Syrjälä * Toggle the enable bits to make sure we get an 1926132c27c9SVille Syrjälä * edge in the ISR pipe event bit if we don't clear 1927132c27c9SVille Syrjälä * all the enabled status bits. Otherwise the edge 1928132c27c9SVille Syrjälä * triggered IIR on i965/g4x wouldn't notice that 1929132c27c9SVille Syrjälä * an interrupt is still pending. 19307e231dbeSJesse Barnes */ 1931132c27c9SVille Syrjälä if (pipe_stats[pipe]) { 1932132c27c9SVille Syrjälä I915_WRITE(reg, pipe_stats[pipe]); 1933132c27c9SVille Syrjälä I915_WRITE(reg, enable_mask); 1934132c27c9SVille Syrjälä } 19357e231dbeSJesse Barnes } 193658ead0d7SImre Deak spin_unlock(&dev_priv->irq_lock); 19372ecb8ca4SVille Syrjälä } 19382ecb8ca4SVille Syrjälä 1939eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1940eb64343cSVille Syrjälä u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1941eb64343cSVille Syrjälä { 1942eb64343cSVille Syrjälä enum pipe pipe; 1943eb64343cSVille Syrjälä 1944eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1945eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1946eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1947eb64343cSVille Syrjälä 1948eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1949eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1950eb64343cSVille Syrjälä 1951eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1952eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1953eb64343cSVille Syrjälä } 1954eb64343cSVille Syrjälä } 1955eb64343cSVille Syrjälä 1956eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1957eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1958eb64343cSVille Syrjälä { 1959eb64343cSVille Syrjälä bool blc_event = false; 1960eb64343cSVille Syrjälä enum pipe pipe; 1961eb64343cSVille Syrjälä 1962eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1963eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1964eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1965eb64343cSVille Syrjälä 1966eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1967eb64343cSVille Syrjälä blc_event = true; 1968eb64343cSVille Syrjälä 1969eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1970eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1971eb64343cSVille Syrjälä 1972eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1973eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1974eb64343cSVille Syrjälä } 1975eb64343cSVille Syrjälä 1976eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1977eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 1978eb64343cSVille Syrjälä } 1979eb64343cSVille Syrjälä 1980eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1981eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1982eb64343cSVille Syrjälä { 1983eb64343cSVille Syrjälä bool blc_event = false; 1984eb64343cSVille Syrjälä enum pipe pipe; 1985eb64343cSVille Syrjälä 1986eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1987eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1988eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1989eb64343cSVille Syrjälä 1990eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1991eb64343cSVille Syrjälä blc_event = true; 1992eb64343cSVille Syrjälä 1993eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1994eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1995eb64343cSVille Syrjälä 1996eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1997eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1998eb64343cSVille Syrjälä } 1999eb64343cSVille Syrjälä 2000eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2001eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 2002eb64343cSVille Syrjälä 2003eb64343cSVille Syrjälä if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2004eb64343cSVille Syrjälä gmbus_irq_handler(dev_priv); 2005eb64343cSVille Syrjälä } 2006eb64343cSVille Syrjälä 200791d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 20082ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES]) 20092ecb8ca4SVille Syrjälä { 20102ecb8ca4SVille Syrjälä enum pipe pipe; 20117e231dbeSJesse Barnes 2012055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2013fd3a4024SDaniel Vetter if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2014fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 20154356d586SDaniel Vetter 20164356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 201791d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 20182d9d2b0bSVille Syrjälä 20191f7247c0SDaniel Vetter if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 20201f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 202131acc7f5SJesse Barnes } 202231acc7f5SJesse Barnes 2023c1874ed7SImre Deak if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 202491d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2025c1874ed7SImre Deak } 2026c1874ed7SImre Deak 20271ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 202816c6c56bSVille Syrjälä { 20290ba7c51aSVille Syrjälä u32 hotplug_status = 0, hotplug_status_mask; 20300ba7c51aSVille Syrjälä int i; 203116c6c56bSVille Syrjälä 20320ba7c51aSVille Syrjälä if (IS_G4X(dev_priv) || 20330ba7c51aSVille Syrjälä IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 20340ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 20350ba7c51aSVille Syrjälä DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 20360ba7c51aSVille Syrjälä else 20370ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 20380ba7c51aSVille Syrjälä 20390ba7c51aSVille Syrjälä /* 20400ba7c51aSVille Syrjälä * We absolutely have to clear all the pending interrupt 20410ba7c51aSVille Syrjälä * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 20420ba7c51aSVille Syrjälä * interrupt bit won't have an edge, and the i965/g4x 20430ba7c51aSVille Syrjälä * edge triggered IIR will not notice that an interrupt 20440ba7c51aSVille Syrjälä * is still pending. We can't use PORT_HOTPLUG_EN to 20450ba7c51aSVille Syrjälä * guarantee the edge as the act of toggling the enable 20460ba7c51aSVille Syrjälä * bits can itself generate a new hotplug interrupt :( 20470ba7c51aSVille Syrjälä */ 20480ba7c51aSVille Syrjälä for (i = 0; i < 10; i++) { 20490ba7c51aSVille Syrjälä u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 20500ba7c51aSVille Syrjälä 20510ba7c51aSVille Syrjälä if (tmp == 0) 20520ba7c51aSVille Syrjälä return hotplug_status; 20530ba7c51aSVille Syrjälä 20540ba7c51aSVille Syrjälä hotplug_status |= tmp; 20553ff60f89SOscar Mateo I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 20560ba7c51aSVille Syrjälä } 20570ba7c51aSVille Syrjälä 20580ba7c51aSVille Syrjälä WARN_ONCE(1, 20590ba7c51aSVille Syrjälä "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 20600ba7c51aSVille Syrjälä I915_READ(PORT_HOTPLUG_STAT)); 20611ae3c34cSVille Syrjälä 20621ae3c34cSVille Syrjälä return hotplug_status; 20631ae3c34cSVille Syrjälä } 20641ae3c34cSVille Syrjälä 206591d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 20661ae3c34cSVille Syrjälä u32 hotplug_status) 20671ae3c34cSVille Syrjälä { 20681ae3c34cSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 20693ff60f89SOscar Mateo 207091d14251STvrtko Ursulin if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 207191d14251STvrtko Ursulin IS_CHERRYVIEW(dev_priv)) { 207216c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 207316c6c56bSVille Syrjälä 207458f2cf24SVille Syrjälä if (hotplug_trigger) { 2075cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2076cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2077cf53902fSRodrigo Vivi hpd_status_g4x, 2078fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 207958f2cf24SVille Syrjälä 208091d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 208158f2cf24SVille Syrjälä } 2082369712e8SJani Nikula 2083369712e8SJani Nikula if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 208491d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 208516c6c56bSVille Syrjälä } else { 208616c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 208716c6c56bSVille Syrjälä 208858f2cf24SVille Syrjälä if (hotplug_trigger) { 2089cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2090cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2091cf53902fSRodrigo Vivi hpd_status_i915, 2092fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 209391d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 209416c6c56bSVille Syrjälä } 20953ff60f89SOscar Mateo } 209658f2cf24SVille Syrjälä } 209716c6c56bSVille Syrjälä 2098c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2099c1874ed7SImre Deak { 210045a83f84SDaniel Vetter struct drm_device *dev = arg; 2101fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2102c1874ed7SImre Deak irqreturn_t ret = IRQ_NONE; 2103c1874ed7SImre Deak 21042dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21052dd2a883SImre Deak return IRQ_NONE; 21062dd2a883SImre Deak 21071f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21081f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21091f814dacSImre Deak 21101e1cace9SVille Syrjälä do { 21116e814800SVille Syrjälä u32 iir, gt_iir, pm_iir; 21122ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21131ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2114a5e485a9SVille Syrjälä u32 ier = 0; 21153ff60f89SOscar Mateo 2116c1874ed7SImre Deak gt_iir = I915_READ(GTIIR); 2117c1874ed7SImre Deak pm_iir = I915_READ(GEN6_PMIIR); 21183ff60f89SOscar Mateo iir = I915_READ(VLV_IIR); 2119c1874ed7SImre Deak 2120c1874ed7SImre Deak if (gt_iir == 0 && pm_iir == 0 && iir == 0) 21211e1cace9SVille Syrjälä break; 2122c1874ed7SImre Deak 2123c1874ed7SImre Deak ret = IRQ_HANDLED; 2124c1874ed7SImre Deak 2125a5e485a9SVille Syrjälä /* 2126a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2127a5e485a9SVille Syrjälä * 2128a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2129a5e485a9SVille Syrjälä * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2130a5e485a9SVille Syrjälä * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2131a5e485a9SVille Syrjälä * 2132a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2133a5e485a9SVille Syrjälä * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2134a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2135a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2136a5e485a9SVille Syrjälä * bits this time around. 2137a5e485a9SVille Syrjälä */ 21384a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 2139a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2140a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 21414a0a0202SVille Syrjälä 21424a0a0202SVille Syrjälä if (gt_iir) 21434a0a0202SVille Syrjälä I915_WRITE(GTIIR, gt_iir); 21444a0a0202SVille Syrjälä if (pm_iir) 21454a0a0202SVille Syrjälä I915_WRITE(GEN6_PMIIR, pm_iir); 21464a0a0202SVille Syrjälä 21477ce4d1f2SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 21481ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 21497ce4d1f2SVille Syrjälä 21503ff60f89SOscar Mateo /* Call regardless, as some status bits might not be 21513ff60f89SOscar Mateo * signalled in iir */ 2152eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 21537ce4d1f2SVille Syrjälä 2154eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2155eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT)) 2156eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2157eef57324SJerome Anand 21587ce4d1f2SVille Syrjälä /* 21597ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 21607ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 21617ce4d1f2SVille Syrjälä */ 21627ce4d1f2SVille Syrjälä if (iir) 21637ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 21644a0a0202SVille Syrjälä 2165a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 21664a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 21671ae3c34cSVille Syrjälä 216852894874SVille Syrjälä if (gt_iir) 2169261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 217052894874SVille Syrjälä if (pm_iir) 217152894874SVille Syrjälä gen6_rps_irq_handler(dev_priv, pm_iir); 217252894874SVille Syrjälä 21731ae3c34cSVille Syrjälä if (hotplug_status) 217491d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 21752ecb8ca4SVille Syrjälä 217691d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 21771e1cace9SVille Syrjälä } while (0); 21787e231dbeSJesse Barnes 21791f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 21801f814dacSImre Deak 21817e231dbeSJesse Barnes return ret; 21827e231dbeSJesse Barnes } 21837e231dbeSJesse Barnes 218443f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg) 218543f328d7SVille Syrjälä { 218645a83f84SDaniel Vetter struct drm_device *dev = arg; 2187fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 218843f328d7SVille Syrjälä irqreturn_t ret = IRQ_NONE; 218943f328d7SVille Syrjälä 21902dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21912dd2a883SImre Deak return IRQ_NONE; 21922dd2a883SImre Deak 21931f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21941f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21951f814dacSImre Deak 2196579de73bSChris Wilson do { 21976e814800SVille Syrjälä u32 master_ctl, iir; 21982ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21991ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2200f0fd96f5SChris Wilson u32 gt_iir[4]; 2201a5e485a9SVille Syrjälä u32 ier = 0; 2202a5e485a9SVille Syrjälä 22038e5fd599SVille Syrjälä master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 22043278f67fSVille Syrjälä iir = I915_READ(VLV_IIR); 22053278f67fSVille Syrjälä 22063278f67fSVille Syrjälä if (master_ctl == 0 && iir == 0) 22078e5fd599SVille Syrjälä break; 220843f328d7SVille Syrjälä 220927b6c122SOscar Mateo ret = IRQ_HANDLED; 221027b6c122SOscar Mateo 2211a5e485a9SVille Syrjälä /* 2212a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2213a5e485a9SVille Syrjälä * 2214a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2215a5e485a9SVille Syrjälä * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2216a5e485a9SVille Syrjälä * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2217a5e485a9SVille Syrjälä * 2218a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2219a5e485a9SVille Syrjälä * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2220a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2221a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2222a5e485a9SVille Syrjälä * bits this time around. 2223a5e485a9SVille Syrjälä */ 222443f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 2225a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2226a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 222743f328d7SVille Syrjälä 2228e30e251aSVille Syrjälä gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 222927b6c122SOscar Mateo 223027b6c122SOscar Mateo if (iir & I915_DISPLAY_PORT_INTERRUPT) 22311ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 223243f328d7SVille Syrjälä 223327b6c122SOscar Mateo /* Call regardless, as some status bits might not be 223427b6c122SOscar Mateo * signalled in iir */ 2235eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 223643f328d7SVille Syrjälä 2237eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2238eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT | 2239eef57324SJerome Anand I915_LPE_PIPE_C_INTERRUPT)) 2240eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2241eef57324SJerome Anand 22427ce4d1f2SVille Syrjälä /* 22437ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 22447ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 22457ce4d1f2SVille Syrjälä */ 22467ce4d1f2SVille Syrjälä if (iir) 22477ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 22487ce4d1f2SVille Syrjälä 2249a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 2250e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 22511ae3c34cSVille Syrjälä 2252f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2253e30e251aSVille Syrjälä 22541ae3c34cSVille Syrjälä if (hotplug_status) 225591d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 22562ecb8ca4SVille Syrjälä 225791d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2258579de73bSChris Wilson } while (0); 22593278f67fSVille Syrjälä 22601f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 22611f814dacSImre Deak 226243f328d7SVille Syrjälä return ret; 226343f328d7SVille Syrjälä } 226443f328d7SVille Syrjälä 226591d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 226691d14251STvrtko Ursulin u32 hotplug_trigger, 226740e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2268776ad806SJesse Barnes { 226942db67d6SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2270776ad806SJesse Barnes 22716a39d7c9SJani Nikula /* 22726a39d7c9SJani Nikula * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 22736a39d7c9SJani Nikula * unless we touch the hotplug register, even if hotplug_trigger is 22746a39d7c9SJani Nikula * zero. Not acking leads to "The master control interrupt lied (SDE)!" 22756a39d7c9SJani Nikula * errors. 22766a39d7c9SJani Nikula */ 227713cf5504SDave Airlie dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 22786a39d7c9SJani Nikula if (!hotplug_trigger) { 22796a39d7c9SJani Nikula u32 mask = PORTA_HOTPLUG_STATUS_MASK | 22806a39d7c9SJani Nikula PORTD_HOTPLUG_STATUS_MASK | 22816a39d7c9SJani Nikula PORTC_HOTPLUG_STATUS_MASK | 22826a39d7c9SJani Nikula PORTB_HOTPLUG_STATUS_MASK; 22836a39d7c9SJani Nikula dig_hotplug_reg &= ~mask; 22846a39d7c9SJani Nikula } 22856a39d7c9SJani Nikula 228613cf5504SDave Airlie I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 22876a39d7c9SJani Nikula if (!hotplug_trigger) 22886a39d7c9SJani Nikula return; 228913cf5504SDave Airlie 2290cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 229140e56410SVille Syrjälä dig_hotplug_reg, hpd, 2292fd63e2a9SImre Deak pch_port_hotplug_long_detect); 229340e56410SVille Syrjälä 229491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2295aaf5ec2eSSonika Jindal } 229691d131d2SDaniel Vetter 229791d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 229840e56410SVille Syrjälä { 229940e56410SVille Syrjälä int pipe; 230040e56410SVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 230140e56410SVille Syrjälä 230291d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 230340e56410SVille Syrjälä 2304cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 2305cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2306776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 2307cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2308cfc33bf7SVille Syrjälä port_name(port)); 2309cfc33bf7SVille Syrjälä } 2310776ad806SJesse Barnes 2311ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 231291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2313ce99c256SDaniel Vetter 2314776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 231591d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2316776ad806SJesse Barnes 2317776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 2318776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2319776ad806SJesse Barnes 2320776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 2321776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2322776ad806SJesse Barnes 2323776ad806SJesse Barnes if (pch_iir & SDE_POISON) 2324776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 2325776ad806SJesse Barnes 23269db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 2327055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 23289db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 23299db4a9c7SJesse Barnes pipe_name(pipe), 23309db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 2331776ad806SJesse Barnes 2332776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2333776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2334776ad806SJesse Barnes 2335776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2336776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2337776ad806SJesse Barnes 2338776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2339a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 23408664281bSPaulo Zanoni 23418664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2342a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 23438664281bSPaulo Zanoni } 23448664281bSPaulo Zanoni 234591d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 23468664281bSPaulo Zanoni { 23478664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 23485a69b89fSDaniel Vetter enum pipe pipe; 23498664281bSPaulo Zanoni 2350de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 2351de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2352de032bf4SPaulo Zanoni 2353055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 23541f7247c0SDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 23551f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 23568664281bSPaulo Zanoni 23575a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 235891d14251STvrtko Ursulin if (IS_IVYBRIDGE(dev_priv)) 235991d14251STvrtko Ursulin ivb_pipe_crc_irq_handler(dev_priv, pipe); 23605a69b89fSDaniel Vetter else 236191d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 23625a69b89fSDaniel Vetter } 23635a69b89fSDaniel Vetter } 23648bf1e9f1SShuang He 23658664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 23668664281bSPaulo Zanoni } 23678664281bSPaulo Zanoni 236891d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 23698664281bSPaulo Zanoni { 23708664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 237145c1cd87SMika Kahola enum pipe pipe; 23728664281bSPaulo Zanoni 2373de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 2374de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 2375de032bf4SPaulo Zanoni 237645c1cd87SMika Kahola for_each_pipe(dev_priv, pipe) 237745c1cd87SMika Kahola if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 237845c1cd87SMika Kahola intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 23798664281bSPaulo Zanoni 23808664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 2381776ad806SJesse Barnes } 2382776ad806SJesse Barnes 238391d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 238423e81d69SAdam Jackson { 238523e81d69SAdam Jackson int pipe; 23866dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2387aaf5ec2eSSonika Jindal 238891d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 238991d131d2SDaniel Vetter 2390cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2391cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 239223e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 2393cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2394cfc33bf7SVille Syrjälä port_name(port)); 2395cfc33bf7SVille Syrjälä } 239623e81d69SAdam Jackson 239723e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 239891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 239923e81d69SAdam Jackson 240023e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 240191d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 240223e81d69SAdam Jackson 240323e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 240423e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 240523e81d69SAdam Jackson 240623e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 240723e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 240823e81d69SAdam Jackson 240923e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 2410055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 241123e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 241223e81d69SAdam Jackson pipe_name(pipe), 241323e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 24148664281bSPaulo Zanoni 24158664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 241691d14251STvrtko Ursulin cpt_serr_int_handler(dev_priv); 241723e81d69SAdam Jackson } 241823e81d69SAdam Jackson 241931604222SAnusha Srivatsa static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 242031604222SAnusha Srivatsa { 242131604222SAnusha Srivatsa u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 242231604222SAnusha Srivatsa u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 242331604222SAnusha Srivatsa u32 pin_mask = 0, long_mask = 0; 242431604222SAnusha Srivatsa 242531604222SAnusha Srivatsa if (ddi_hotplug_trigger) { 242631604222SAnusha Srivatsa u32 dig_hotplug_reg; 242731604222SAnusha Srivatsa 242831604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 242931604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 243031604222SAnusha Srivatsa 243131604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 243231604222SAnusha Srivatsa ddi_hotplug_trigger, 243331604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 243431604222SAnusha Srivatsa icp_ddi_port_hotplug_long_detect); 243531604222SAnusha Srivatsa } 243631604222SAnusha Srivatsa 243731604222SAnusha Srivatsa if (tc_hotplug_trigger) { 243831604222SAnusha Srivatsa u32 dig_hotplug_reg; 243931604222SAnusha Srivatsa 244031604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 244131604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 244231604222SAnusha Srivatsa 244331604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 244431604222SAnusha Srivatsa tc_hotplug_trigger, 244531604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 244631604222SAnusha Srivatsa icp_tc_port_hotplug_long_detect); 244731604222SAnusha Srivatsa } 244831604222SAnusha Srivatsa 244931604222SAnusha Srivatsa if (pin_mask) 245031604222SAnusha Srivatsa intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 245131604222SAnusha Srivatsa 245231604222SAnusha Srivatsa if (pch_iir & SDE_GMBUS_ICP) 245331604222SAnusha Srivatsa gmbus_irq_handler(dev_priv); 245431604222SAnusha Srivatsa } 245531604222SAnusha Srivatsa 245691d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 24576dbf30ceSVille Syrjälä { 24586dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 24596dbf30ceSVille Syrjälä ~SDE_PORTE_HOTPLUG_SPT; 24606dbf30ceSVille Syrjälä u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 24616dbf30ceSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 24626dbf30ceSVille Syrjälä 24636dbf30ceSVille Syrjälä if (hotplug_trigger) { 24646dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24656dbf30ceSVille Syrjälä 24666dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 24676dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 24686dbf30ceSVille Syrjälä 2469cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2470cf53902fSRodrigo Vivi hotplug_trigger, dig_hotplug_reg, hpd_spt, 247174c0b395SVille Syrjälä spt_port_hotplug_long_detect); 24726dbf30ceSVille Syrjälä } 24736dbf30ceSVille Syrjälä 24746dbf30ceSVille Syrjälä if (hotplug2_trigger) { 24756dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24766dbf30ceSVille Syrjälä 24776dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 24786dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 24796dbf30ceSVille Syrjälä 2480cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2481cf53902fSRodrigo Vivi hotplug2_trigger, dig_hotplug_reg, hpd_spt, 24826dbf30ceSVille Syrjälä spt_port_hotplug2_long_detect); 24836dbf30ceSVille Syrjälä } 24846dbf30ceSVille Syrjälä 24856dbf30ceSVille Syrjälä if (pin_mask) 248691d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 24876dbf30ceSVille Syrjälä 24886dbf30ceSVille Syrjälä if (pch_iir & SDE_GMBUS_CPT) 248991d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 24906dbf30ceSVille Syrjälä } 24916dbf30ceSVille Syrjälä 249291d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 249391d14251STvrtko Ursulin u32 hotplug_trigger, 249440e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2495c008bc6eSPaulo Zanoni { 2496e4ce95aaSVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2497e4ce95aaSVille Syrjälä 2498e4ce95aaSVille Syrjälä dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2499e4ce95aaSVille Syrjälä I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2500e4ce95aaSVille Syrjälä 2501cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 250240e56410SVille Syrjälä dig_hotplug_reg, hpd, 2503e4ce95aaSVille Syrjälä ilk_port_hotplug_long_detect); 250440e56410SVille Syrjälä 250591d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2506e4ce95aaSVille Syrjälä } 2507c008bc6eSPaulo Zanoni 250891d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 250991d14251STvrtko Ursulin u32 de_iir) 251040e56410SVille Syrjälä { 251140e56410SVille Syrjälä enum pipe pipe; 251240e56410SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 251340e56410SVille Syrjälä 251440e56410SVille Syrjälä if (hotplug_trigger) 251591d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 251640e56410SVille Syrjälä 2517c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 251891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2519c008bc6eSPaulo Zanoni 2520c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 252191d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2522c008bc6eSPaulo Zanoni 2523c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 2524c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2525c008bc6eSPaulo Zanoni 2526055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2527fd3a4024SDaniel Vetter if (de_iir & DE_PIPE_VBLANK(pipe)) 2528fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2529c008bc6eSPaulo Zanoni 253040da17c2SDaniel Vetter if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 25311f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2532c008bc6eSPaulo Zanoni 253340da17c2SDaniel Vetter if (de_iir & DE_PIPE_CRC_DONE(pipe)) 253491d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2535c008bc6eSPaulo Zanoni } 2536c008bc6eSPaulo Zanoni 2537c008bc6eSPaulo Zanoni /* check event from PCH */ 2538c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 2539c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 2540c008bc6eSPaulo Zanoni 254191d14251STvrtko Ursulin if (HAS_PCH_CPT(dev_priv)) 254291d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 2543c008bc6eSPaulo Zanoni else 254491d14251STvrtko Ursulin ibx_irq_handler(dev_priv, pch_iir); 2545c008bc6eSPaulo Zanoni 2546c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 2547c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 2548c008bc6eSPaulo Zanoni } 2549c008bc6eSPaulo Zanoni 255091d14251STvrtko Ursulin if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 255191d14251STvrtko Ursulin ironlake_rps_change_irq_handler(dev_priv); 2552c008bc6eSPaulo Zanoni } 2553c008bc6eSPaulo Zanoni 255491d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 255591d14251STvrtko Ursulin u32 de_iir) 25569719fb98SPaulo Zanoni { 255707d27e20SDamien Lespiau enum pipe pipe; 255823bb4cb5SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 255923bb4cb5SVille Syrjälä 256040e56410SVille Syrjälä if (hotplug_trigger) 256191d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 25629719fb98SPaulo Zanoni 25639719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 256491d14251STvrtko Ursulin ivb_err_int_handler(dev_priv); 25659719fb98SPaulo Zanoni 256654fd3149SDhinakaran Pandiyan if (de_iir & DE_EDP_PSR_INT_HSW) { 256754fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 256854fd3149SDhinakaran Pandiyan 256954fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 257054fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 257154fd3149SDhinakaran Pandiyan } 2572fc340442SDaniel Vetter 25739719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 257491d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 25759719fb98SPaulo Zanoni 25769719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 257791d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 25789719fb98SPaulo Zanoni 2579055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2580fd3a4024SDaniel Vetter if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2581fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 25829719fb98SPaulo Zanoni } 25839719fb98SPaulo Zanoni 25849719fb98SPaulo Zanoni /* check event from PCH */ 258591d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 25869719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 25879719fb98SPaulo Zanoni 258891d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 25899719fb98SPaulo Zanoni 25909719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 25919719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 25929719fb98SPaulo Zanoni } 25939719fb98SPaulo Zanoni } 25949719fb98SPaulo Zanoni 259572c90f62SOscar Mateo /* 259672c90f62SOscar Mateo * To handle irqs with the minimum potential races with fresh interrupts, we: 259772c90f62SOscar Mateo * 1 - Disable Master Interrupt Control. 259872c90f62SOscar Mateo * 2 - Find the source(s) of the interrupt. 259972c90f62SOscar Mateo * 3 - Clear the Interrupt Identity bits (IIR). 260072c90f62SOscar Mateo * 4 - Process the interrupt(s) that had bits set in the IIRs. 260172c90f62SOscar Mateo * 5 - Re-enable Master Interrupt Control. 260272c90f62SOscar Mateo */ 2603f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2604b1f14ad0SJesse Barnes { 260545a83f84SDaniel Vetter struct drm_device *dev = arg; 2606fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2607f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 26080e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 2609b1f14ad0SJesse Barnes 26102dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 26112dd2a883SImre Deak return IRQ_NONE; 26122dd2a883SImre Deak 26131f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26141f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 26151f814dacSImre Deak 2616b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 2617b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 2618b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 26190e43406bSChris Wilson 262044498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 262144498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 262244498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 262344498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 262444498aeaSPaulo Zanoni * due to its back queue). */ 262591d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv)) { 262644498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 262744498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 2628ab5c608bSBen Widawsky } 262944498aeaSPaulo Zanoni 263072c90f62SOscar Mateo /* Find, clear, then process each source of interrupt */ 263172c90f62SOscar Mateo 26320e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 26330e43406bSChris Wilson if (gt_iir) { 263472c90f62SOscar Mateo I915_WRITE(GTIIR, gt_iir); 263572c90f62SOscar Mateo ret = IRQ_HANDLED; 263691d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 2637261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 2638d8fc8a47SPaulo Zanoni else 2639261e40b8SVille Syrjälä ilk_gt_irq_handler(dev_priv, gt_iir); 26400e43406bSChris Wilson } 2641b1f14ad0SJesse Barnes 2642b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 26430e43406bSChris Wilson if (de_iir) { 264472c90f62SOscar Mateo I915_WRITE(DEIIR, de_iir); 264572c90f62SOscar Mateo ret = IRQ_HANDLED; 264691d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) 264791d14251STvrtko Ursulin ivb_display_irq_handler(dev_priv, de_iir); 2648f1af8fc1SPaulo Zanoni else 264991d14251STvrtko Ursulin ilk_display_irq_handler(dev_priv, de_iir); 26500e43406bSChris Wilson } 26510e43406bSChris Wilson 265291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 2653f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 26540e43406bSChris Wilson if (pm_iir) { 2655b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 26560e43406bSChris Wilson ret = IRQ_HANDLED; 265772c90f62SOscar Mateo gen6_rps_irq_handler(dev_priv, pm_iir); 26580e43406bSChris Wilson } 2659f1af8fc1SPaulo Zanoni } 2660b1f14ad0SJesse Barnes 2661b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 266274093f3eSChris Wilson if (!HAS_PCH_NOP(dev_priv)) 266344498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 2664b1f14ad0SJesse Barnes 26651f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26661f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 26671f814dacSImre Deak 2668b1f14ad0SJesse Barnes return ret; 2669b1f14ad0SJesse Barnes } 2670b1f14ad0SJesse Barnes 267191d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 267291d14251STvrtko Ursulin u32 hotplug_trigger, 267340e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2674d04a492dSShashank Sharma { 2675cebd87a0SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2676d04a492dSShashank Sharma 2677a52bb15bSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2678a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2679d04a492dSShashank Sharma 2680cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 268140e56410SVille Syrjälä dig_hotplug_reg, hpd, 2682cebd87a0SVille Syrjälä bxt_port_hotplug_long_detect); 268340e56410SVille Syrjälä 268491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2685d04a492dSShashank Sharma } 2686d04a492dSShashank Sharma 2687121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2688121e758eSDhinakaran Pandiyan { 2689121e758eSDhinakaran Pandiyan u32 pin_mask = 0, long_mask = 0; 2690b796b971SDhinakaran Pandiyan u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2691b796b971SDhinakaran Pandiyan u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2692121e758eSDhinakaran Pandiyan 2693121e758eSDhinakaran Pandiyan if (trigger_tc) { 2694b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2695b796b971SDhinakaran Pandiyan 2696121e758eSDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2697121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2698121e758eSDhinakaran Pandiyan 2699121e758eSDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2700b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2701121e758eSDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2702121e758eSDhinakaran Pandiyan } 2703b796b971SDhinakaran Pandiyan 2704b796b971SDhinakaran Pandiyan if (trigger_tbt) { 2705b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2706b796b971SDhinakaran Pandiyan 2707b796b971SDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2708b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2709b796b971SDhinakaran Pandiyan 2710b796b971SDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2711b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2712b796b971SDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2713b796b971SDhinakaran Pandiyan } 2714b796b971SDhinakaran Pandiyan 2715b796b971SDhinakaran Pandiyan if (pin_mask) 2716b796b971SDhinakaran Pandiyan intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2717b796b971SDhinakaran Pandiyan else 2718b796b971SDhinakaran Pandiyan DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2719121e758eSDhinakaran Pandiyan } 2720121e758eSDhinakaran Pandiyan 2721f11a0f46STvrtko Ursulin static irqreturn_t 2722f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2723abd58f01SBen Widawsky { 2724abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 2725f11a0f46STvrtko Ursulin u32 iir; 2726c42664ccSDaniel Vetter enum pipe pipe; 272788e04703SJesse Barnes 2728abd58f01SBen Widawsky if (master_ctl & GEN8_DE_MISC_IRQ) { 2729e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_MISC_IIR); 2730e32192e1STvrtko Ursulin if (iir) { 2731e04f7eceSVille Syrjälä bool found = false; 2732e04f7eceSVille Syrjälä 2733e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_MISC_IIR, iir); 2734abd58f01SBen Widawsky ret = IRQ_HANDLED; 2735e04f7eceSVille Syrjälä 2736e04f7eceSVille Syrjälä if (iir & GEN8_DE_MISC_GSE) { 273791d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2738e04f7eceSVille Syrjälä found = true; 2739e04f7eceSVille Syrjälä } 2740e04f7eceSVille Syrjälä 2741e04f7eceSVille Syrjälä if (iir & GEN8_DE_EDP_PSR) { 274254fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 274354fd3149SDhinakaran Pandiyan 274454fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 274554fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 2746e04f7eceSVille Syrjälä found = true; 2747e04f7eceSVille Syrjälä } 2748e04f7eceSVille Syrjälä 2749e04f7eceSVille Syrjälä if (!found) 275038cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Misc interrupt\n"); 2751abd58f01SBen Widawsky } 275238cc46d7SOscar Mateo else 275338cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2754abd58f01SBen Widawsky } 2755abd58f01SBen Widawsky 2756121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2757121e758eSDhinakaran Pandiyan iir = I915_READ(GEN11_DE_HPD_IIR); 2758121e758eSDhinakaran Pandiyan if (iir) { 2759121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IIR, iir); 2760121e758eSDhinakaran Pandiyan ret = IRQ_HANDLED; 2761121e758eSDhinakaran Pandiyan gen11_hpd_irq_handler(dev_priv, iir); 2762121e758eSDhinakaran Pandiyan } else { 2763121e758eSDhinakaran Pandiyan DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2764121e758eSDhinakaran Pandiyan } 2765121e758eSDhinakaran Pandiyan } 2766121e758eSDhinakaran Pandiyan 27676d766f02SDaniel Vetter if (master_ctl & GEN8_DE_PORT_IRQ) { 2768e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PORT_IIR); 2769e32192e1STvrtko Ursulin if (iir) { 2770e32192e1STvrtko Ursulin u32 tmp_mask; 2771d04a492dSShashank Sharma bool found = false; 2772cebd87a0SVille Syrjälä 2773e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PORT_IIR, iir); 27746d766f02SDaniel Vetter ret = IRQ_HANDLED; 277588e04703SJesse Barnes 2776e32192e1STvrtko Ursulin tmp_mask = GEN8_AUX_CHANNEL_A; 2777bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2778e32192e1STvrtko Ursulin tmp_mask |= GEN9_AUX_CHANNEL_B | 2779e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_C | 2780e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_D; 2781e32192e1STvrtko Ursulin 2782bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 2783bb187e93SJames Ausmus tmp_mask |= ICL_AUX_CHANNEL_E; 2784bb187e93SJames Ausmus 27859bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || 27869bb635d9SDhinakaran Pandiyan INTEL_GEN(dev_priv) >= 11) 2787a324fcacSRodrigo Vivi tmp_mask |= CNL_AUX_CHANNEL_F; 2788a324fcacSRodrigo Vivi 2789e32192e1STvrtko Ursulin if (iir & tmp_mask) { 279091d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2791d04a492dSShashank Sharma found = true; 2792d04a492dSShashank Sharma } 2793d04a492dSShashank Sharma 2794cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) { 2795e32192e1STvrtko Ursulin tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2796e32192e1STvrtko Ursulin if (tmp_mask) { 279791d14251STvrtko Ursulin bxt_hpd_irq_handler(dev_priv, tmp_mask, 279891d14251STvrtko Ursulin hpd_bxt); 2799d04a492dSShashank Sharma found = true; 2800d04a492dSShashank Sharma } 2801e32192e1STvrtko Ursulin } else if (IS_BROADWELL(dev_priv)) { 2802e32192e1STvrtko Ursulin tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2803e32192e1STvrtko Ursulin if (tmp_mask) { 280491d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, 280591d14251STvrtko Ursulin tmp_mask, hpd_bdw); 2806e32192e1STvrtko Ursulin found = true; 2807e32192e1STvrtko Ursulin } 2808e32192e1STvrtko Ursulin } 2809d04a492dSShashank Sharma 2810cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 281191d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 28129e63743eSShashank Sharma found = true; 28139e63743eSShashank Sharma } 28149e63743eSShashank Sharma 2815d04a492dSShashank Sharma if (!found) 281638cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Port interrupt\n"); 28176d766f02SDaniel Vetter } 281838cc46d7SOscar Mateo else 281938cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 28206d766f02SDaniel Vetter } 28216d766f02SDaniel Vetter 2822055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2823fd3a4024SDaniel Vetter u32 fault_errors; 2824abd58f01SBen Widawsky 2825c42664ccSDaniel Vetter if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2826c42664ccSDaniel Vetter continue; 2827c42664ccSDaniel Vetter 2828e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2829e32192e1STvrtko Ursulin if (!iir) { 2830e32192e1STvrtko Ursulin DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2831e32192e1STvrtko Ursulin continue; 2832e32192e1STvrtko Ursulin } 2833770de83dSDamien Lespiau 2834e32192e1STvrtko Ursulin ret = IRQ_HANDLED; 2835e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2836e32192e1STvrtko Ursulin 2837fd3a4024SDaniel Vetter if (iir & GEN8_PIPE_VBLANK) 2838fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2839abd58f01SBen Widawsky 2840e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 284191d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 28420fbe7870SDaniel Vetter 2843e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2844e32192e1STvrtko Ursulin intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 284538d83c96SDaniel Vetter 2846e32192e1STvrtko Ursulin fault_errors = iir; 2847bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2848e32192e1STvrtko Ursulin fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2849770de83dSDamien Lespiau else 2850e32192e1STvrtko Ursulin fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2851770de83dSDamien Lespiau 2852770de83dSDamien Lespiau if (fault_errors) 28531353ec38STvrtko Ursulin DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 285430100f2bSDaniel Vetter pipe_name(pipe), 2855e32192e1STvrtko Ursulin fault_errors); 2856abd58f01SBen Widawsky } 2857abd58f01SBen Widawsky 285891d14251STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2859266ea3d9SShashank Sharma master_ctl & GEN8_DE_PCH_IRQ) { 286092d03a80SDaniel Vetter /* 286192d03a80SDaniel Vetter * FIXME(BDW): Assume for now that the new interrupt handling 286292d03a80SDaniel Vetter * scheme also closed the SDE interrupt handling race we've seen 286392d03a80SDaniel Vetter * on older pch-split platforms. But this needs testing. 286492d03a80SDaniel Vetter */ 2865e32192e1STvrtko Ursulin iir = I915_READ(SDEIIR); 2866e32192e1STvrtko Ursulin if (iir) { 2867e32192e1STvrtko Ursulin I915_WRITE(SDEIIR, iir); 286892d03a80SDaniel Vetter ret = IRQ_HANDLED; 28696dbf30ceSVille Syrjälä 287031604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 287131604222SAnusha Srivatsa icp_irq_handler(dev_priv, iir); 287231604222SAnusha Srivatsa else if (HAS_PCH_SPT(dev_priv) || 287331604222SAnusha Srivatsa HAS_PCH_KBP(dev_priv) || 28747b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 287591d14251STvrtko Ursulin spt_irq_handler(dev_priv, iir); 28766dbf30ceSVille Syrjälä else 287791d14251STvrtko Ursulin cpt_irq_handler(dev_priv, iir); 28782dfb0b81SJani Nikula } else { 28792dfb0b81SJani Nikula /* 28802dfb0b81SJani Nikula * Like on previous PCH there seems to be something 28812dfb0b81SJani Nikula * fishy going on with forwarding PCH interrupts. 28822dfb0b81SJani Nikula */ 28832dfb0b81SJani Nikula DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 28842dfb0b81SJani Nikula } 288592d03a80SDaniel Vetter } 288692d03a80SDaniel Vetter 2887f11a0f46STvrtko Ursulin return ret; 2888f11a0f46STvrtko Ursulin } 2889f11a0f46STvrtko Ursulin 2890f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg) 2891f11a0f46STvrtko Ursulin { 2892f0fd96f5SChris Wilson struct drm_i915_private *dev_priv = to_i915(arg); 2893f11a0f46STvrtko Ursulin u32 master_ctl; 2894f0fd96f5SChris Wilson u32 gt_iir[4]; 2895f11a0f46STvrtko Ursulin 2896f11a0f46STvrtko Ursulin if (!intel_irqs_enabled(dev_priv)) 2897f11a0f46STvrtko Ursulin return IRQ_NONE; 2898f11a0f46STvrtko Ursulin 2899f11a0f46STvrtko Ursulin master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2900f11a0f46STvrtko Ursulin master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2901f11a0f46STvrtko Ursulin if (!master_ctl) 2902f11a0f46STvrtko Ursulin return IRQ_NONE; 2903f11a0f46STvrtko Ursulin 2904f11a0f46STvrtko Ursulin I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2905f11a0f46STvrtko Ursulin 2906f11a0f46STvrtko Ursulin /* Find, clear, then process each source of interrupt */ 290755ef72f2SChris Wilson gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2908f0fd96f5SChris Wilson 2909f0fd96f5SChris Wilson /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2910f0fd96f5SChris Wilson if (master_ctl & ~GEN8_GT_IRQS) { 2911f0fd96f5SChris Wilson disable_rpm_wakeref_asserts(dev_priv); 291255ef72f2SChris Wilson gen8_de_irq_handler(dev_priv, master_ctl); 2913f0fd96f5SChris Wilson enable_rpm_wakeref_asserts(dev_priv); 2914f0fd96f5SChris Wilson } 2915f11a0f46STvrtko Ursulin 2916cb0d205eSChris Wilson I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2917abd58f01SBen Widawsky 2918f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 29191f814dacSImre Deak 292055ef72f2SChris Wilson return IRQ_HANDLED; 2921abd58f01SBen Widawsky } 2922abd58f01SBen Widawsky 292336703e79SChris Wilson struct wedge_me { 292436703e79SChris Wilson struct delayed_work work; 292536703e79SChris Wilson struct drm_i915_private *i915; 292636703e79SChris Wilson const char *name; 292736703e79SChris Wilson }; 292836703e79SChris Wilson 292936703e79SChris Wilson static void wedge_me(struct work_struct *work) 293036703e79SChris Wilson { 293136703e79SChris Wilson struct wedge_me *w = container_of(work, typeof(*w), work.work); 293236703e79SChris Wilson 293336703e79SChris Wilson dev_err(w->i915->drm.dev, 293436703e79SChris Wilson "%s timed out, cancelling all in-flight rendering.\n", 293536703e79SChris Wilson w->name); 293636703e79SChris Wilson i915_gem_set_wedged(w->i915); 293736703e79SChris Wilson } 293836703e79SChris Wilson 293936703e79SChris Wilson static void __init_wedge(struct wedge_me *w, 294036703e79SChris Wilson struct drm_i915_private *i915, 294136703e79SChris Wilson long timeout, 294236703e79SChris Wilson const char *name) 294336703e79SChris Wilson { 294436703e79SChris Wilson w->i915 = i915; 294536703e79SChris Wilson w->name = name; 294636703e79SChris Wilson 294736703e79SChris Wilson INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 294836703e79SChris Wilson schedule_delayed_work(&w->work, timeout); 294936703e79SChris Wilson } 295036703e79SChris Wilson 295136703e79SChris Wilson static void __fini_wedge(struct wedge_me *w) 295236703e79SChris Wilson { 295336703e79SChris Wilson cancel_delayed_work_sync(&w->work); 295436703e79SChris Wilson destroy_delayed_work_on_stack(&w->work); 295536703e79SChris Wilson w->i915 = NULL; 295636703e79SChris Wilson } 295736703e79SChris Wilson 295836703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 295936703e79SChris Wilson for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 296036703e79SChris Wilson (W)->i915; \ 296136703e79SChris Wilson __fini_wedge((W))) 296236703e79SChris Wilson 296351951ae7SMika Kuoppala static u32 2964f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915, 296551951ae7SMika Kuoppala const unsigned int bank, const unsigned int bit) 296651951ae7SMika Kuoppala { 296751951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 296851951ae7SMika Kuoppala u32 timeout_ts; 296951951ae7SMika Kuoppala u32 ident; 297051951ae7SMika Kuoppala 297196606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 297296606f3bSOscar Mateo 297351951ae7SMika Kuoppala raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 297451951ae7SMika Kuoppala 297551951ae7SMika Kuoppala /* 297651951ae7SMika Kuoppala * NB: Specs do not specify how long to spin wait, 297751951ae7SMika Kuoppala * so we do ~100us as an educated guess. 297851951ae7SMika Kuoppala */ 297951951ae7SMika Kuoppala timeout_ts = (local_clock() >> 10) + 100; 298051951ae7SMika Kuoppala do { 298151951ae7SMika Kuoppala ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 298251951ae7SMika Kuoppala } while (!(ident & GEN11_INTR_DATA_VALID) && 298351951ae7SMika Kuoppala !time_after32(local_clock() >> 10, timeout_ts)); 298451951ae7SMika Kuoppala 298551951ae7SMika Kuoppala if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 298651951ae7SMika Kuoppala DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 298751951ae7SMika Kuoppala bank, bit, ident); 298851951ae7SMika Kuoppala return 0; 298951951ae7SMika Kuoppala } 299051951ae7SMika Kuoppala 299151951ae7SMika Kuoppala raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 299251951ae7SMika Kuoppala GEN11_INTR_DATA_VALID); 299351951ae7SMika Kuoppala 2994f744dbc2SMika Kuoppala return ident; 2995f744dbc2SMika Kuoppala } 2996f744dbc2SMika Kuoppala 2997f744dbc2SMika Kuoppala static void 2998f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915, 2999f744dbc2SMika Kuoppala const u8 instance, const u16 iir) 3000f744dbc2SMika Kuoppala { 3001d02b98b8SOscar Mateo if (instance == OTHER_GTPM_INSTANCE) 3002d02b98b8SOscar Mateo return gen6_rps_irq_handler(i915, iir); 3003d02b98b8SOscar Mateo 3004f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3005f744dbc2SMika Kuoppala instance, iir); 3006f744dbc2SMika Kuoppala } 3007f744dbc2SMika Kuoppala 3008f744dbc2SMika Kuoppala static void 3009f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915, 3010f744dbc2SMika Kuoppala const u8 class, const u8 instance, const u16 iir) 3011f744dbc2SMika Kuoppala { 3012f744dbc2SMika Kuoppala struct intel_engine_cs *engine; 3013f744dbc2SMika Kuoppala 3014f744dbc2SMika Kuoppala if (instance <= MAX_ENGINE_INSTANCE) 3015f744dbc2SMika Kuoppala engine = i915->engine_class[class][instance]; 3016f744dbc2SMika Kuoppala else 3017f744dbc2SMika Kuoppala engine = NULL; 3018f744dbc2SMika Kuoppala 3019f744dbc2SMika Kuoppala if (likely(engine)) 3020f744dbc2SMika Kuoppala return gen8_cs_irq_handler(engine, iir); 3021f744dbc2SMika Kuoppala 3022f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3023f744dbc2SMika Kuoppala class, instance); 3024f744dbc2SMika Kuoppala } 3025f744dbc2SMika Kuoppala 3026f744dbc2SMika Kuoppala static void 3027f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915, 3028f744dbc2SMika Kuoppala const u32 identity) 3029f744dbc2SMika Kuoppala { 3030f744dbc2SMika Kuoppala const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3031f744dbc2SMika Kuoppala const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3032f744dbc2SMika Kuoppala const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3033f744dbc2SMika Kuoppala 3034f744dbc2SMika Kuoppala if (unlikely(!intr)) 3035f744dbc2SMika Kuoppala return; 3036f744dbc2SMika Kuoppala 3037f744dbc2SMika Kuoppala if (class <= COPY_ENGINE_CLASS) 3038f744dbc2SMika Kuoppala return gen11_engine_irq_handler(i915, class, instance, intr); 3039f744dbc2SMika Kuoppala 3040f744dbc2SMika Kuoppala if (class == OTHER_CLASS) 3041f744dbc2SMika Kuoppala return gen11_other_irq_handler(i915, instance, intr); 3042f744dbc2SMika Kuoppala 3043f744dbc2SMika Kuoppala WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3044f744dbc2SMika Kuoppala class, instance, intr); 304551951ae7SMika Kuoppala } 304651951ae7SMika Kuoppala 304751951ae7SMika Kuoppala static void 304896606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915, 304996606f3bSOscar Mateo const unsigned int bank) 305051951ae7SMika Kuoppala { 305151951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 305251951ae7SMika Kuoppala unsigned long intr_dw; 305351951ae7SMika Kuoppala unsigned int bit; 305451951ae7SMika Kuoppala 305596606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 305651951ae7SMika Kuoppala 305751951ae7SMika Kuoppala intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 305851951ae7SMika Kuoppala 305951951ae7SMika Kuoppala if (unlikely(!intr_dw)) { 306051951ae7SMika Kuoppala DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 306196606f3bSOscar Mateo return; 306251951ae7SMika Kuoppala } 306351951ae7SMika Kuoppala 306451951ae7SMika Kuoppala for_each_set_bit(bit, &intr_dw, 32) { 3065f744dbc2SMika Kuoppala const u32 ident = gen11_gt_engine_identity(i915, 3066f744dbc2SMika Kuoppala bank, bit); 306751951ae7SMika Kuoppala 3068f744dbc2SMika Kuoppala gen11_gt_identity_handler(i915, ident); 306951951ae7SMika Kuoppala } 307051951ae7SMika Kuoppala 307151951ae7SMika Kuoppala /* Clear must be after shared has been served for engine */ 307251951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 307351951ae7SMika Kuoppala } 307496606f3bSOscar Mateo 307596606f3bSOscar Mateo static void 307696606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915, 307796606f3bSOscar Mateo const u32 master_ctl) 307896606f3bSOscar Mateo { 307996606f3bSOscar Mateo unsigned int bank; 308096606f3bSOscar Mateo 308196606f3bSOscar Mateo spin_lock(&i915->irq_lock); 308296606f3bSOscar Mateo 308396606f3bSOscar Mateo for (bank = 0; bank < 2; bank++) { 308496606f3bSOscar Mateo if (master_ctl & GEN11_GT_DW_IRQ(bank)) 308596606f3bSOscar Mateo gen11_gt_bank_handler(i915, bank); 308696606f3bSOscar Mateo } 308796606f3bSOscar Mateo 308896606f3bSOscar Mateo spin_unlock(&i915->irq_lock); 308951951ae7SMika Kuoppala } 309051951ae7SMika Kuoppala 3091df0d28c1SDhinakaran Pandiyan static void 3092df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, 3093df0d28c1SDhinakaran Pandiyan u32 *iir) 3094df0d28c1SDhinakaran Pandiyan { 3095df0d28c1SDhinakaran Pandiyan void __iomem * const regs = dev_priv->regs; 3096df0d28c1SDhinakaran Pandiyan 3097df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3098df0d28c1SDhinakaran Pandiyan return; 3099df0d28c1SDhinakaran Pandiyan 3100df0d28c1SDhinakaran Pandiyan *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3101df0d28c1SDhinakaran Pandiyan if (likely(*iir)) 3102df0d28c1SDhinakaran Pandiyan raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir); 3103df0d28c1SDhinakaran Pandiyan } 3104df0d28c1SDhinakaran Pandiyan 3105df0d28c1SDhinakaran Pandiyan static void 3106df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, 3107df0d28c1SDhinakaran Pandiyan const u32 master_ctl, const u32 iir) 3108df0d28c1SDhinakaran Pandiyan { 3109df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3110df0d28c1SDhinakaran Pandiyan return; 3111df0d28c1SDhinakaran Pandiyan 3112df0d28c1SDhinakaran Pandiyan if (unlikely(!iir)) { 3113df0d28c1SDhinakaran Pandiyan DRM_ERROR("GU_MISC iir blank!\n"); 3114df0d28c1SDhinakaran Pandiyan return; 3115df0d28c1SDhinakaran Pandiyan } 3116df0d28c1SDhinakaran Pandiyan 3117df0d28c1SDhinakaran Pandiyan if (iir & GEN11_GU_MISC_GSE) 3118df0d28c1SDhinakaran Pandiyan intel_opregion_asle_intr(dev_priv); 3119df0d28c1SDhinakaran Pandiyan else 3120df0d28c1SDhinakaran Pandiyan DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir); 3121df0d28c1SDhinakaran Pandiyan } 3122df0d28c1SDhinakaran Pandiyan 312351951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg) 312451951ae7SMika Kuoppala { 312551951ae7SMika Kuoppala struct drm_i915_private * const i915 = to_i915(arg); 312651951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 312751951ae7SMika Kuoppala u32 master_ctl; 3128df0d28c1SDhinakaran Pandiyan u32 gu_misc_iir; 312951951ae7SMika Kuoppala 313051951ae7SMika Kuoppala if (!intel_irqs_enabled(i915)) 313151951ae7SMika Kuoppala return IRQ_NONE; 313251951ae7SMika Kuoppala 313351951ae7SMika Kuoppala master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 313451951ae7SMika Kuoppala master_ctl &= ~GEN11_MASTER_IRQ; 313551951ae7SMika Kuoppala if (!master_ctl) 313651951ae7SMika Kuoppala return IRQ_NONE; 313751951ae7SMika Kuoppala 313851951ae7SMika Kuoppala /* Disable interrupts. */ 313951951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 314051951ae7SMika Kuoppala 314151951ae7SMika Kuoppala /* Find, clear, then process each source of interrupt. */ 314251951ae7SMika Kuoppala gen11_gt_irq_handler(i915, master_ctl); 314351951ae7SMika Kuoppala 314451951ae7SMika Kuoppala /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 314551951ae7SMika Kuoppala if (master_ctl & GEN11_DISPLAY_IRQ) { 314651951ae7SMika Kuoppala const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 314751951ae7SMika Kuoppala 314851951ae7SMika Kuoppala disable_rpm_wakeref_asserts(i915); 314951951ae7SMika Kuoppala /* 315051951ae7SMika Kuoppala * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 315151951ae7SMika Kuoppala * for the display related bits. 315251951ae7SMika Kuoppala */ 315351951ae7SMika Kuoppala gen8_de_irq_handler(i915, disp_ctl); 315451951ae7SMika Kuoppala enable_rpm_wakeref_asserts(i915); 315551951ae7SMika Kuoppala } 315651951ae7SMika Kuoppala 3157df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); 3158df0d28c1SDhinakaran Pandiyan 315951951ae7SMika Kuoppala /* Acknowledge and enable interrupts. */ 316051951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 316151951ae7SMika Kuoppala 3162df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); 3163df0d28c1SDhinakaran Pandiyan 316451951ae7SMika Kuoppala return IRQ_HANDLED; 316551951ae7SMika Kuoppala } 316651951ae7SMika Kuoppala 3167ce800754SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv, 3168d0667e9cSChris Wilson u32 engine_mask, 3169d0667e9cSChris Wilson const char *reason) 31708a905236SJesse Barnes { 3171ce800754SChris Wilson struct i915_gpu_error *error = &dev_priv->gpu_error; 317291c8a326SChris Wilson struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3173cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3174cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3175cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 317636703e79SChris Wilson struct wedge_me w; 31778a905236SJesse Barnes 3178c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 31798a905236SJesse Barnes 318044d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 3181c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 31821f83fee0SDaniel Vetter 318336703e79SChris Wilson /* Use a watchdog to ensure that our reset completes */ 318436703e79SChris Wilson i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3185c033666aSChris Wilson intel_prepare_reset(dev_priv); 31867514747dSVille Syrjälä 3187d0667e9cSChris Wilson error->reason = reason; 3188d0667e9cSChris Wilson error->stalled_mask = engine_mask; 3189ce800754SChris Wilson 319036703e79SChris Wilson /* Signal that locked waiters should reset the GPU */ 3191d0667e9cSChris Wilson smp_mb__before_atomic(); 3192ce800754SChris Wilson set_bit(I915_RESET_HANDOFF, &error->flags); 3193ce800754SChris Wilson wake_up_all(&error->wait_queue); 31948c185ecaSChris Wilson 319536703e79SChris Wilson /* Wait for anyone holding the lock to wakeup, without 319636703e79SChris Wilson * blocking indefinitely on struct_mutex. 319717e1df07SDaniel Vetter */ 319836703e79SChris Wilson do { 3199780f262aSChris Wilson if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3200d0667e9cSChris Wilson i915_reset(dev_priv, engine_mask, reason); 3201221fe799SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 3202780f262aSChris Wilson } 3203ce800754SChris Wilson } while (wait_on_bit_timeout(&error->flags, 32048c185ecaSChris Wilson I915_RESET_HANDOFF, 3205780f262aSChris Wilson TASK_UNINTERRUPTIBLE, 320636703e79SChris Wilson 1)); 3207f69061beSDaniel Vetter 3208d0667e9cSChris Wilson error->stalled_mask = 0; 3209ce800754SChris Wilson error->reason = NULL; 3210ce800754SChris Wilson 3211c033666aSChris Wilson intel_finish_reset(dev_priv); 321236703e79SChris Wilson } 3213f454c694SImre Deak 3214ce800754SChris Wilson if (!test_bit(I915_WEDGED, &error->flags)) 3215ce800754SChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3216f316a42cSBen Gamari } 32178a905236SJesse Barnes 3218*09605548SLionel Landwerlin void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3219c0e09200SDave Airlie { 3220eaa14c24SChris Wilson u32 eir; 322163eeaf38SJesse Barnes 3222eaa14c24SChris Wilson if (!IS_GEN2(dev_priv)) 3223eaa14c24SChris Wilson I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 322463eeaf38SJesse Barnes 3225eaa14c24SChris Wilson if (INTEL_GEN(dev_priv) < 4) 3226eaa14c24SChris Wilson I915_WRITE(IPEIR, I915_READ(IPEIR)); 3227eaa14c24SChris Wilson else 3228eaa14c24SChris Wilson I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 32298a905236SJesse Barnes 3230eaa14c24SChris Wilson I915_WRITE(EIR, I915_READ(EIR)); 323163eeaf38SJesse Barnes eir = I915_READ(EIR); 323263eeaf38SJesse Barnes if (eir) { 323363eeaf38SJesse Barnes /* 323463eeaf38SJesse Barnes * some errors might have become stuck, 323563eeaf38SJesse Barnes * mask them. 323663eeaf38SJesse Barnes */ 3237eaa14c24SChris Wilson DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 323863eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 323978c357ddSVille Syrjälä I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 324063eeaf38SJesse Barnes } 3241*09605548SLionel Landwerlin 3242*09605548SLionel Landwerlin if (INTEL_GEN(dev_priv) >= 8) { 3243*09605548SLionel Landwerlin I915_WRITE(GEN8_RING_FAULT_REG, 3244*09605548SLionel Landwerlin I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); 3245*09605548SLionel Landwerlin POSTING_READ(GEN8_RING_FAULT_REG); 3246*09605548SLionel Landwerlin } else if (INTEL_GEN(dev_priv) >= 6) { 3247*09605548SLionel Landwerlin struct intel_engine_cs *engine; 3248*09605548SLionel Landwerlin enum intel_engine_id id; 3249*09605548SLionel Landwerlin 3250*09605548SLionel Landwerlin for_each_engine(engine, dev_priv, id) { 3251*09605548SLionel Landwerlin I915_WRITE(RING_FAULT_REG(engine), 3252*09605548SLionel Landwerlin I915_READ(RING_FAULT_REG(engine)) & 3253*09605548SLionel Landwerlin ~RING_FAULT_VALID); 3254*09605548SLionel Landwerlin } 3255*09605548SLionel Landwerlin POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); 3256*09605548SLionel Landwerlin } 325735aed2e6SChris Wilson } 325835aed2e6SChris Wilson 325935aed2e6SChris Wilson /** 3260b8d24a06SMika Kuoppala * i915_handle_error - handle a gpu error 326114bb2c11STvrtko Ursulin * @dev_priv: i915 device private 326214b730fcSarun.siluvery@linux.intel.com * @engine_mask: mask representing engines that are hung 3263ce800754SChris Wilson * @flags: control flags 326487c390b6SMichel Thierry * @fmt: Error message format string 326587c390b6SMichel Thierry * 3266aafd8581SJavier Martinez Canillas * Do some basic checking of register state at error time and 326735aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 326835aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 326935aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 327035aed2e6SChris Wilson * of a ring dump etc.). 327135aed2e6SChris Wilson */ 3272c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv, 3273c033666aSChris Wilson u32 engine_mask, 3274ce800754SChris Wilson unsigned long flags, 327558174462SMika Kuoppala const char *fmt, ...) 327635aed2e6SChris Wilson { 3277142bc7d9SMichel Thierry struct intel_engine_cs *engine; 3278142bc7d9SMichel Thierry unsigned int tmp; 327958174462SMika Kuoppala char error_msg[80]; 3280ce800754SChris Wilson char *msg = NULL; 3281ce800754SChris Wilson 3282ce800754SChris Wilson if (fmt) { 3283ce800754SChris Wilson va_list args; 328435aed2e6SChris Wilson 328558174462SMika Kuoppala va_start(args, fmt); 328658174462SMika Kuoppala vscnprintf(error_msg, sizeof(error_msg), fmt, args); 328758174462SMika Kuoppala va_end(args); 328858174462SMika Kuoppala 3289ce800754SChris Wilson msg = error_msg; 3290ce800754SChris Wilson } 3291ce800754SChris Wilson 32921604a86dSChris Wilson /* 32931604a86dSChris Wilson * In most cases it's guaranteed that we get here with an RPM 32941604a86dSChris Wilson * reference held, for example because there is a pending GPU 32951604a86dSChris Wilson * request that won't finish until the reset is done. This 32961604a86dSChris Wilson * isn't the case at least when we get here by doing a 32971604a86dSChris Wilson * simulated reset via debugfs, so get an RPM reference. 32981604a86dSChris Wilson */ 32991604a86dSChris Wilson intel_runtime_pm_get(dev_priv); 33001604a86dSChris Wilson 3301873d66fbSChris Wilson engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3302ce800754SChris Wilson 3303ce800754SChris Wilson if (flags & I915_ERROR_CAPTURE) { 3304ce800754SChris Wilson i915_capture_error_state(dev_priv, engine_mask, msg); 3305eaa14c24SChris Wilson i915_clear_error_registers(dev_priv); 3306ce800754SChris Wilson } 33078a905236SJesse Barnes 3308142bc7d9SMichel Thierry /* 3309142bc7d9SMichel Thierry * Try engine reset when available. We fall back to full reset if 3310142bc7d9SMichel Thierry * single reset fails. 3311142bc7d9SMichel Thierry */ 3312142bc7d9SMichel Thierry if (intel_has_reset_engine(dev_priv)) { 3313142bc7d9SMichel Thierry for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 33149db529aaSDaniel Vetter BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3315142bc7d9SMichel Thierry if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3316142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3317142bc7d9SMichel Thierry continue; 3318142bc7d9SMichel Thierry 3319ce800754SChris Wilson if (i915_reset_engine(engine, msg) == 0) 3320142bc7d9SMichel Thierry engine_mask &= ~intel_engine_flag(engine); 3321142bc7d9SMichel Thierry 3322142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3323142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3324142bc7d9SMichel Thierry wake_up_bit(&dev_priv->gpu_error.flags, 3325142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id); 3326142bc7d9SMichel Thierry } 3327142bc7d9SMichel Thierry } 3328142bc7d9SMichel Thierry 33298af29b0cSChris Wilson if (!engine_mask) 33301604a86dSChris Wilson goto out; 33318af29b0cSChris Wilson 3332142bc7d9SMichel Thierry /* Full reset needs the mutex, stop any other user trying to do so. */ 3333d5367307SChris Wilson if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3334d5367307SChris Wilson wait_event(dev_priv->gpu_error.reset_queue, 3335d5367307SChris Wilson !test_bit(I915_RESET_BACKOFF, 3336d5367307SChris Wilson &dev_priv->gpu_error.flags)); 33371604a86dSChris Wilson goto out; 3338d5367307SChris Wilson } 3339ba1234d1SBen Gamari 3340142bc7d9SMichel Thierry /* Prevent any other reset-engine attempt. */ 3341142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3342142bc7d9SMichel Thierry while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3343142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3344142bc7d9SMichel Thierry wait_on_bit(&dev_priv->gpu_error.flags, 3345142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id, 3346142bc7d9SMichel Thierry TASK_UNINTERRUPTIBLE); 3347142bc7d9SMichel Thierry } 3348142bc7d9SMichel Thierry 3349d0667e9cSChris Wilson i915_reset_device(dev_priv, engine_mask, msg); 3350d5367307SChris Wilson 3351142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3352142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3353142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3354142bc7d9SMichel Thierry } 3355142bc7d9SMichel Thierry 3356d5367307SChris Wilson clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3357d5367307SChris Wilson wake_up_all(&dev_priv->gpu_error.reset_queue); 33581604a86dSChris Wilson 33591604a86dSChris Wilson out: 33601604a86dSChris Wilson intel_runtime_pm_put(dev_priv); 33618a905236SJesse Barnes } 33628a905236SJesse Barnes 336342f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 336442f52ef8SKeith Packard * we use as a pipe index 336542f52ef8SKeith Packard */ 336686e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 33670a3e67a4SJesse Barnes { 3368fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3369e9d21d7fSKeith Packard unsigned long irqflags; 337071e0ffa5SJesse Barnes 33711ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 337286e83e35SChris Wilson i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 337386e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 337486e83e35SChris Wilson 337586e83e35SChris Wilson return 0; 337686e83e35SChris Wilson } 337786e83e35SChris Wilson 337886e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 337986e83e35SChris Wilson { 338086e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 338186e83e35SChris Wilson unsigned long irqflags; 338286e83e35SChris Wilson 338386e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 33847c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 3385755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 33861ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 33878692d00eSChris Wilson 33880a3e67a4SJesse Barnes return 0; 33890a3e67a4SJesse Barnes } 33900a3e67a4SJesse Barnes 339188e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3392f796cf8fSJesse Barnes { 3393fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3394f796cf8fSJesse Barnes unsigned long irqflags; 339555b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 339686e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3397f796cf8fSJesse Barnes 3398f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3399fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, bit); 3400b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3401b1f14ad0SJesse Barnes 34022e8bf223SDhinakaran Pandiyan /* Even though there is no DMC, frame counter can get stuck when 34032e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated. 34042e8bf223SDhinakaran Pandiyan */ 34052e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34062e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34072e8bf223SDhinakaran Pandiyan 3408b1f14ad0SJesse Barnes return 0; 3409b1f14ad0SJesse Barnes } 3410b1f14ad0SJesse Barnes 341188e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3412abd58f01SBen Widawsky { 3413fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3414abd58f01SBen Widawsky unsigned long irqflags; 3415abd58f01SBen Widawsky 3416abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3417013d3752SVille Syrjälä bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3418abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3419013d3752SVille Syrjälä 34202e8bf223SDhinakaran Pandiyan /* Even if there is no DMC, frame counter can get stuck when 34212e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated, so check only for PSR. 34222e8bf223SDhinakaran Pandiyan */ 34232e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34242e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34252e8bf223SDhinakaran Pandiyan 3426abd58f01SBen Widawsky return 0; 3427abd58f01SBen Widawsky } 3428abd58f01SBen Widawsky 342942f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 343042f52ef8SKeith Packard * we use as a pipe index 343142f52ef8SKeith Packard */ 343286e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 343386e83e35SChris Wilson { 343486e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 343586e83e35SChris Wilson unsigned long irqflags; 343686e83e35SChris Wilson 343786e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 343886e83e35SChris Wilson i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 343986e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 344086e83e35SChris Wilson } 344186e83e35SChris Wilson 344286e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 34430a3e67a4SJesse Barnes { 3444fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3445e9d21d7fSKeith Packard unsigned long irqflags; 34460a3e67a4SJesse Barnes 34471ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34487c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 3449755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34501ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34510a3e67a4SJesse Barnes } 34520a3e67a4SJesse Barnes 345388e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3454f796cf8fSJesse Barnes { 3455fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3456f796cf8fSJesse Barnes unsigned long irqflags; 345755b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 345886e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3459f796cf8fSJesse Barnes 3460f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3461fbdedaeaSVille Syrjälä ilk_disable_display_irq(dev_priv, bit); 3462b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3463b1f14ad0SJesse Barnes } 3464b1f14ad0SJesse Barnes 346588e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3466abd58f01SBen Widawsky { 3467fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3468abd58f01SBen Widawsky unsigned long irqflags; 3469abd58f01SBen Widawsky 3470abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3471013d3752SVille Syrjälä bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3472abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3473abd58f01SBen Widawsky } 3474abd58f01SBen Widawsky 3475b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv) 347691738a95SPaulo Zanoni { 34776e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 347891738a95SPaulo Zanoni return; 347991738a95SPaulo Zanoni 34803488d4ebSVille Syrjälä GEN3_IRQ_RESET(SDE); 3481105b122eSPaulo Zanoni 34826e266956STvrtko Ursulin if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3483105b122eSPaulo Zanoni I915_WRITE(SERR_INT, 0xffffffff); 3484622364b6SPaulo Zanoni } 3485105b122eSPaulo Zanoni 348691738a95SPaulo Zanoni /* 3487622364b6SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed PCH 3488622364b6SPaulo Zanoni * interrupts. Hence we can't update it after the interrupt handler is enabled - 3489622364b6SPaulo Zanoni * instead we unconditionally enable all PCH interrupt sources here, but then 3490622364b6SPaulo Zanoni * only unmask them as needed with SDEIMR. 3491622364b6SPaulo Zanoni * 3492622364b6SPaulo Zanoni * This function needs to be called before interrupts are enabled. 349391738a95SPaulo Zanoni */ 3494622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev) 3495622364b6SPaulo Zanoni { 3496fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3497622364b6SPaulo Zanoni 34986e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3499622364b6SPaulo Zanoni return; 3500622364b6SPaulo Zanoni 3501622364b6SPaulo Zanoni WARN_ON(I915_READ(SDEIER) != 0); 350291738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 350391738a95SPaulo Zanoni POSTING_READ(SDEIER); 350491738a95SPaulo Zanoni } 350591738a95SPaulo Zanoni 3506b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3507d18ea1b5SDaniel Vetter { 35083488d4ebSVille Syrjälä GEN3_IRQ_RESET(GT); 3509b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 35103488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN6_PM); 3511d18ea1b5SDaniel Vetter } 3512d18ea1b5SDaniel Vetter 351370591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 351470591a41SVille Syrjälä { 351571b8b41dSVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 351671b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 351771b8b41dSVille Syrjälä else 351871b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 351971b8b41dSVille Syrjälä 3520ad22d106SVille Syrjälä i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 352170591a41SVille Syrjälä I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 352270591a41SVille Syrjälä 352344d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 352470591a41SVille Syrjälä 35253488d4ebSVille Syrjälä GEN3_IRQ_RESET(VLV_); 35268bd099a7SChris Wilson dev_priv->irq_mask = ~0u; 352770591a41SVille Syrjälä } 352870591a41SVille Syrjälä 35298bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 35308bb61306SVille Syrjälä { 35318bb61306SVille Syrjälä u32 pipestat_mask; 35329ab981f2SVille Syrjälä u32 enable_mask; 35338bb61306SVille Syrjälä enum pipe pipe; 35348bb61306SVille Syrjälä 3535842ebf7aSVille Syrjälä pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 35368bb61306SVille Syrjälä 35378bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 35388bb61306SVille Syrjälä for_each_pipe(dev_priv, pipe) 35398bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 35408bb61306SVille Syrjälä 35419ab981f2SVille Syrjälä enable_mask = I915_DISPLAY_PORT_INTERRUPT | 35428bb61306SVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3543ebf5f921SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3544ebf5f921SVille Syrjälä I915_LPE_PIPE_A_INTERRUPT | 3545ebf5f921SVille Syrjälä I915_LPE_PIPE_B_INTERRUPT; 3546ebf5f921SVille Syrjälä 35478bb61306SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 3548ebf5f921SVille Syrjälä enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3549ebf5f921SVille Syrjälä I915_LPE_PIPE_C_INTERRUPT; 35506b7eafc1SVille Syrjälä 35518bd099a7SChris Wilson WARN_ON(dev_priv->irq_mask != ~0u); 35526b7eafc1SVille Syrjälä 35539ab981f2SVille Syrjälä dev_priv->irq_mask = ~enable_mask; 35548bb61306SVille Syrjälä 35553488d4ebSVille Syrjälä GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 35568bb61306SVille Syrjälä } 35578bb61306SVille Syrjälä 35588bb61306SVille Syrjälä /* drm_dma.h hooks 35598bb61306SVille Syrjälä */ 35608bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev) 35618bb61306SVille Syrjälä { 3562fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35638bb61306SVille Syrjälä 3564d420a50cSVille Syrjälä if (IS_GEN5(dev_priv)) 35658bb61306SVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 35668bb61306SVille Syrjälä 35673488d4ebSVille Syrjälä GEN3_IRQ_RESET(DE); 35685db94019STvrtko Ursulin if (IS_GEN7(dev_priv)) 35698bb61306SVille Syrjälä I915_WRITE(GEN7_ERR_INT, 0xffffffff); 35708bb61306SVille Syrjälä 3571fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 3572fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3573fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3574fc340442SDaniel Vetter } 3575fc340442SDaniel Vetter 3576b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 35778bb61306SVille Syrjälä 3578b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 35798bb61306SVille Syrjälä } 35808bb61306SVille Syrjälä 35816bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev) 35827e231dbeSJesse Barnes { 3583fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35847e231dbeSJesse Barnes 358534c7b8a7SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 358634c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 358734c7b8a7SVille Syrjälä 3588b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 35897e231dbeSJesse Barnes 3590ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 35919918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 359270591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3593ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 35947e231dbeSJesse Barnes } 35957e231dbeSJesse Barnes 3596d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3597d6e3cca3SDaniel Vetter { 3598d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 0); 3599d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 1); 3600d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 2); 3601d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 3); 3602d6e3cca3SDaniel Vetter } 3603d6e3cca3SDaniel Vetter 3604823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev) 3605abd58f01SBen Widawsky { 3606fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3607abd58f01SBen Widawsky int pipe; 3608abd58f01SBen Widawsky 3609abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, 0); 3610abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 3611abd58f01SBen Widawsky 3612d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 3613abd58f01SBen Widawsky 3614e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3615e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3616e04f7eceSVille Syrjälä 3617055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 3618f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 3619813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 3620f86f3fb0SPaulo Zanoni GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3621abd58f01SBen Widawsky 36223488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_PORT_); 36233488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_MISC_); 36243488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 3625abd58f01SBen Widawsky 36266e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 3627b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 3628abd58f01SBen Widawsky } 3629abd58f01SBen Widawsky 363051951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 363151951ae7SMika Kuoppala { 363251951ae7SMika Kuoppala /* Disable RCS, BCS, VCS and VECS class engines. */ 363351951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 363451951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 363551951ae7SMika Kuoppala 363651951ae7SMika Kuoppala /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 363751951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 363851951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 363951951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 364051951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 364151951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3642d02b98b8SOscar Mateo 3643d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3644d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 364551951ae7SMika Kuoppala } 364651951ae7SMika Kuoppala 364751951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev) 364851951ae7SMika Kuoppala { 364951951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 365051951ae7SMika Kuoppala int pipe; 365151951ae7SMika Kuoppala 365251951ae7SMika Kuoppala I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 365351951ae7SMika Kuoppala POSTING_READ(GEN11_GFX_MSTR_IRQ); 365451951ae7SMika Kuoppala 365551951ae7SMika Kuoppala gen11_gt_irq_reset(dev_priv); 365651951ae7SMika Kuoppala 365751951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 365851951ae7SMika Kuoppala 365951951ae7SMika Kuoppala for_each_pipe(dev_priv, pipe) 366051951ae7SMika Kuoppala if (intel_display_power_is_enabled(dev_priv, 366151951ae7SMika Kuoppala POWER_DOMAIN_PIPE(pipe))) 366251951ae7SMika Kuoppala GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 366351951ae7SMika Kuoppala 366451951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_PORT_); 366551951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_MISC_); 3666121e758eSDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_DE_HPD_); 3667df0d28c1SDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_GU_MISC_); 366851951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_PCU_); 366931604222SAnusha Srivatsa 367031604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 367131604222SAnusha Srivatsa GEN3_IRQ_RESET(SDE); 367251951ae7SMika Kuoppala } 367351951ae7SMika Kuoppala 36744c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3675001bd2cbSImre Deak u8 pipe_mask) 3676d49bdb0eSPaulo Zanoni { 36771180e206SPaulo Zanoni uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 36786831f3e3SVille Syrjälä enum pipe pipe; 3679d49bdb0eSPaulo Zanoni 368013321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 36819dfe2e3aSImre Deak 36829dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 36839dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 36849dfe2e3aSImre Deak return; 36859dfe2e3aSImre Deak } 36869dfe2e3aSImre Deak 36876831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 36886831f3e3SVille Syrjälä GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 36896831f3e3SVille Syrjälä dev_priv->de_irq_mask[pipe], 36906831f3e3SVille Syrjälä ~dev_priv->de_irq_mask[pipe] | extra_ier); 36919dfe2e3aSImre Deak 369213321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 3693d49bdb0eSPaulo Zanoni } 3694d49bdb0eSPaulo Zanoni 3695aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3696001bd2cbSImre Deak u8 pipe_mask) 3697aae8ba84SVille Syrjälä { 36986831f3e3SVille Syrjälä enum pipe pipe; 36996831f3e3SVille Syrjälä 3700aae8ba84SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37019dfe2e3aSImre Deak 37029dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 37039dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 37049dfe2e3aSImre Deak return; 37059dfe2e3aSImre Deak } 37069dfe2e3aSImre Deak 37076831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37086831f3e3SVille Syrjälä GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 37099dfe2e3aSImre Deak 3710aae8ba84SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 3711aae8ba84SVille Syrjälä 3712aae8ba84SVille Syrjälä /* make sure we're done processing display irqs */ 371391c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 3714aae8ba84SVille Syrjälä } 3715aae8ba84SVille Syrjälä 37166bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev) 371743f328d7SVille Syrjälä { 3718fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 371943f328d7SVille Syrjälä 372043f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 372143f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 372243f328d7SVille Syrjälä 3723d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 372443f328d7SVille Syrjälä 37253488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 372643f328d7SVille Syrjälä 3727ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37289918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 372970591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3730ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 373143f328d7SVille Syrjälä } 373243f328d7SVille Syrjälä 373391d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 373487a02106SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 373587a02106SVille Syrjälä { 373687a02106SVille Syrjälä struct intel_encoder *encoder; 373787a02106SVille Syrjälä u32 enabled_irqs = 0; 373887a02106SVille Syrjälä 373991c8a326SChris Wilson for_each_intel_encoder(&dev_priv->drm, encoder) 374087a02106SVille Syrjälä if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 374187a02106SVille Syrjälä enabled_irqs |= hpd[encoder->hpd_pin]; 374287a02106SVille Syrjälä 374387a02106SVille Syrjälä return enabled_irqs; 374487a02106SVille Syrjälä } 374587a02106SVille Syrjälä 37461a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 37471a56b1a2SImre Deak { 37481a56b1a2SImre Deak u32 hotplug; 37491a56b1a2SImre Deak 37501a56b1a2SImre Deak /* 37511a56b1a2SImre Deak * Enable digital hotplug on the PCH, and configure the DP short pulse 37521a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec). 37531a56b1a2SImre Deak * The pulse duration bits are reserved on LPT+. 37541a56b1a2SImre Deak */ 37551a56b1a2SImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 37561a56b1a2SImre Deak hotplug &= ~(PORTB_PULSE_DURATION_MASK | 37571a56b1a2SImre Deak PORTC_PULSE_DURATION_MASK | 37581a56b1a2SImre Deak PORTD_PULSE_DURATION_MASK); 37591a56b1a2SImre Deak hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 37601a56b1a2SImre Deak hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 37611a56b1a2SImre Deak hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 37621a56b1a2SImre Deak /* 37631a56b1a2SImre Deak * When CPU and PCH are on the same package, port A 37641a56b1a2SImre Deak * HPD must be enabled in both north and south. 37651a56b1a2SImre Deak */ 37661a56b1a2SImre Deak if (HAS_PCH_LPT_LP(dev_priv)) 37671a56b1a2SImre Deak hotplug |= PORTA_HOTPLUG_ENABLE; 37681a56b1a2SImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 37691a56b1a2SImre Deak } 37701a56b1a2SImre Deak 377191d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 377282a28bcfSDaniel Vetter { 37731a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 377482a28bcfSDaniel Vetter 377591d14251STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) { 3776fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 377791d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 377882a28bcfSDaniel Vetter } else { 3779fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 378091d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 378182a28bcfSDaniel Vetter } 378282a28bcfSDaniel Vetter 3783fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 378482a28bcfSDaniel Vetter 37851a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 37866dbf30ceSVille Syrjälä } 378726951cafSXiong Zhang 378831604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 378931604222SAnusha Srivatsa { 379031604222SAnusha Srivatsa u32 hotplug; 379131604222SAnusha Srivatsa 379231604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_DDI); 379331604222SAnusha Srivatsa hotplug |= ICP_DDIA_HPD_ENABLE | 379431604222SAnusha Srivatsa ICP_DDIB_HPD_ENABLE; 379531604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 379631604222SAnusha Srivatsa 379731604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_TC); 379831604222SAnusha Srivatsa hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 379931604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC2) | 380031604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC3) | 380131604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC4); 380231604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 380331604222SAnusha Srivatsa } 380431604222SAnusha Srivatsa 380531604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 380631604222SAnusha Srivatsa { 380731604222SAnusha Srivatsa u32 hotplug_irqs, enabled_irqs; 380831604222SAnusha Srivatsa 380931604222SAnusha Srivatsa hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 381031604222SAnusha Srivatsa enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 381131604222SAnusha Srivatsa 381231604222SAnusha Srivatsa ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 381331604222SAnusha Srivatsa 381431604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 381531604222SAnusha Srivatsa } 381631604222SAnusha Srivatsa 3817121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3818121e758eSDhinakaran Pandiyan { 3819121e758eSDhinakaran Pandiyan u32 hotplug; 3820121e758eSDhinakaran Pandiyan 3821121e758eSDhinakaran Pandiyan hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3822121e758eSDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3823121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3824121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3825121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3826121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3827b796b971SDhinakaran Pandiyan 3828b796b971SDhinakaran Pandiyan hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3829b796b971SDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3830b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3831b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3832b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3833b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3834121e758eSDhinakaran Pandiyan } 3835121e758eSDhinakaran Pandiyan 3836121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3837121e758eSDhinakaran Pandiyan { 3838121e758eSDhinakaran Pandiyan u32 hotplug_irqs, enabled_irqs; 3839121e758eSDhinakaran Pandiyan u32 val; 3840121e758eSDhinakaran Pandiyan 3841b796b971SDhinakaran Pandiyan enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3842b796b971SDhinakaran Pandiyan hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3843121e758eSDhinakaran Pandiyan 3844121e758eSDhinakaran Pandiyan val = I915_READ(GEN11_DE_HPD_IMR); 3845121e758eSDhinakaran Pandiyan val &= ~hotplug_irqs; 3846121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IMR, val); 3847121e758eSDhinakaran Pandiyan POSTING_READ(GEN11_DE_HPD_IMR); 3848121e758eSDhinakaran Pandiyan 3849121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 385031604222SAnusha Srivatsa 385131604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 385231604222SAnusha Srivatsa icp_hpd_irq_setup(dev_priv); 3853121e758eSDhinakaran Pandiyan } 3854121e758eSDhinakaran Pandiyan 38552a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 38562a57d9ccSImre Deak { 38573b92e263SRodrigo Vivi u32 val, hotplug; 38583b92e263SRodrigo Vivi 38593b92e263SRodrigo Vivi /* Display WA #1179 WaHardHangonHotPlug: cnp */ 38603b92e263SRodrigo Vivi if (HAS_PCH_CNP(dev_priv)) { 38613b92e263SRodrigo Vivi val = I915_READ(SOUTH_CHICKEN1); 38623b92e263SRodrigo Vivi val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 38633b92e263SRodrigo Vivi val |= CHASSIS_CLK_REQ_DURATION(0xf); 38643b92e263SRodrigo Vivi I915_WRITE(SOUTH_CHICKEN1, val); 38653b92e263SRodrigo Vivi } 38662a57d9ccSImre Deak 38672a57d9ccSImre Deak /* Enable digital hotplug on the PCH */ 38682a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 38692a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 38702a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 38712a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE | 38722a57d9ccSImre Deak PORTD_HOTPLUG_ENABLE; 38732a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 38742a57d9ccSImre Deak 38752a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG2); 38762a57d9ccSImre Deak hotplug |= PORTE_HOTPLUG_ENABLE; 38772a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 38782a57d9ccSImre Deak } 38792a57d9ccSImre Deak 388091d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 38816dbf30ceSVille Syrjälä { 38822a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 38836dbf30ceSVille Syrjälä 38846dbf30ceSVille Syrjälä hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 388591d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 38866dbf30ceSVille Syrjälä 38876dbf30ceSVille Syrjälä ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 38886dbf30ceSVille Syrjälä 38892a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 389026951cafSXiong Zhang } 38917fe0b973SKeith Packard 38921a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 38931a56b1a2SImre Deak { 38941a56b1a2SImre Deak u32 hotplug; 38951a56b1a2SImre Deak 38961a56b1a2SImre Deak /* 38971a56b1a2SImre Deak * Enable digital hotplug on the CPU, and configure the DP short pulse 38981a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec) 38991a56b1a2SImre Deak * The pulse duration bits are reserved on HSW+. 39001a56b1a2SImre Deak */ 39011a56b1a2SImre Deak hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 39021a56b1a2SImre Deak hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 39031a56b1a2SImre Deak hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 39041a56b1a2SImre Deak DIGITAL_PORTA_PULSE_DURATION_2ms; 39051a56b1a2SImre Deak I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 39061a56b1a2SImre Deak } 39071a56b1a2SImre Deak 390891d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3909e4ce95aaSVille Syrjälä { 39101a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 3911e4ce95aaSVille Syrjälä 391291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 8) { 39133a3b3c7dSVille Syrjälä hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 391491d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 39153a3b3c7dSVille Syrjälä 39163a3b3c7dSVille Syrjälä bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 391791d14251STvrtko Ursulin } else if (INTEL_GEN(dev_priv) >= 7) { 391823bb4cb5SVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 391991d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 39203a3b3c7dSVille Syrjälä 39213a3b3c7dSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 392223bb4cb5SVille Syrjälä } else { 3923e4ce95aaSVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG; 392491d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3925e4ce95aaSVille Syrjälä 3926e4ce95aaSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 39273a3b3c7dSVille Syrjälä } 3928e4ce95aaSVille Syrjälä 39291a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 3930e4ce95aaSVille Syrjälä 393191d14251STvrtko Ursulin ibx_hpd_irq_setup(dev_priv); 3932e4ce95aaSVille Syrjälä } 3933e4ce95aaSVille Syrjälä 39342a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 39352a57d9ccSImre Deak u32 enabled_irqs) 3936e0a20ad7SShashank Sharma { 39372a57d9ccSImre Deak u32 hotplug; 3938e0a20ad7SShashank Sharma 3939a52bb15bSVille Syrjälä hotplug = I915_READ(PCH_PORT_HOTPLUG); 39402a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 39412a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 39422a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE; 3943d252bf68SShubhangi Shrivastava 3944d252bf68SShubhangi Shrivastava DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3945d252bf68SShubhangi Shrivastava hotplug, enabled_irqs); 3946d252bf68SShubhangi Shrivastava hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3947d252bf68SShubhangi Shrivastava 3948d252bf68SShubhangi Shrivastava /* 3949d252bf68SShubhangi Shrivastava * For BXT invert bit has to be set based on AOB design 3950d252bf68SShubhangi Shrivastava * for HPD detection logic, update it based on VBT fields. 3951d252bf68SShubhangi Shrivastava */ 3952d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3953d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3954d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIA_HPD_INVERT; 3955d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3956d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3957d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIB_HPD_INVERT; 3958d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3959d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3960d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIC_HPD_INVERT; 3961d252bf68SShubhangi Shrivastava 3962a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3963e0a20ad7SShashank Sharma } 3964e0a20ad7SShashank Sharma 39652a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 39662a57d9ccSImre Deak { 39672a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 39682a57d9ccSImre Deak } 39692a57d9ccSImre Deak 39702a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 39712a57d9ccSImre Deak { 39722a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39732a57d9ccSImre Deak 39742a57d9ccSImre Deak enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 39752a57d9ccSImre Deak hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 39762a57d9ccSImre Deak 39772a57d9ccSImre Deak bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 39782a57d9ccSImre Deak 39792a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 39802a57d9ccSImre Deak } 39812a57d9ccSImre Deak 3982d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 3983d46da437SPaulo Zanoni { 3984fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 398582a28bcfSDaniel Vetter u32 mask; 3986d46da437SPaulo Zanoni 39876e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3988692a04cfSDaniel Vetter return; 3989692a04cfSDaniel Vetter 39906e266956STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) 39915c673b60SDaniel Vetter mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 39924ebc6509SDhinakaran Pandiyan else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 39935c673b60SDaniel Vetter mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 39944ebc6509SDhinakaran Pandiyan else 39954ebc6509SDhinakaran Pandiyan mask = SDE_GMBUS_CPT; 39968664281bSPaulo Zanoni 39973488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3998d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 39992a57d9ccSImre Deak 40002a57d9ccSImre Deak if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 40012a57d9ccSImre Deak HAS_PCH_LPT(dev_priv)) 40021a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 40032a57d9ccSImre Deak else 40042a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 4005d46da437SPaulo Zanoni } 4006d46da437SPaulo Zanoni 40070a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 40080a9a8c91SDaniel Vetter { 4009fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40100a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 40110a9a8c91SDaniel Vetter 40120a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 40130a9a8c91SDaniel Vetter 40140a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 40153c9192bcSTvrtko Ursulin if (HAS_L3_DPF(dev_priv)) { 40160a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 4017772c2a51STvrtko Ursulin dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4018772c2a51STvrtko Ursulin gt_irqs |= GT_PARITY_ERROR(dev_priv); 40190a9a8c91SDaniel Vetter } 40200a9a8c91SDaniel Vetter 40210a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 40225db94019STvrtko Ursulin if (IS_GEN5(dev_priv)) { 4023f8973c21SChris Wilson gt_irqs |= ILK_BSD_USER_INTERRUPT; 40240a9a8c91SDaniel Vetter } else { 40250a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 40260a9a8c91SDaniel Vetter } 40270a9a8c91SDaniel Vetter 40283488d4ebSVille Syrjälä GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 40290a9a8c91SDaniel Vetter 4030b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 403178e68d36SImre Deak /* 403278e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS 403378e68d36SImre Deak * itself is enabled/disabled. 403478e68d36SImre Deak */ 4035f4e9af4fSAkash Goel if (HAS_VEBOX(dev_priv)) { 40360a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4037f4e9af4fSAkash Goel dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4038f4e9af4fSAkash Goel } 40390a9a8c91SDaniel Vetter 4040f4e9af4fSAkash Goel dev_priv->pm_imr = 0xffffffff; 40413488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 40420a9a8c91SDaniel Vetter } 40430a9a8c91SDaniel Vetter } 40440a9a8c91SDaniel Vetter 4045f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 4046036a4a7dSZhenyu Wang { 4047fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40488e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 40498e76f8dcSPaulo Zanoni 4050b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) { 40518e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4052842ebf7aSVille Syrjälä DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 40538e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 405423bb4cb5SVille Syrjälä DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 405523bb4cb5SVille Syrjälä DE_DP_A_HOTPLUG_IVB); 40568e76f8dcSPaulo Zanoni } else { 40578e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4058842ebf7aSVille Syrjälä DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4059842ebf7aSVille Syrjälä DE_PIPEA_CRC_DONE | DE_POISON); 4060e4ce95aaSVille Syrjälä extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4061e4ce95aaSVille Syrjälä DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4062e4ce95aaSVille Syrjälä DE_DP_A_HOTPLUG); 40638e76f8dcSPaulo Zanoni } 4064036a4a7dSZhenyu Wang 4065fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 4066fc340442SDaniel Vetter gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 40671aeb1b5fSDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4068fc340442SDaniel Vetter display_mask |= DE_EDP_PSR_INT_HSW; 4069fc340442SDaniel Vetter } 4070fc340442SDaniel Vetter 40711ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 4072036a4a7dSZhenyu Wang 4073622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4074622364b6SPaulo Zanoni 40753488d4ebSVille Syrjälä GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4076036a4a7dSZhenyu Wang 40770a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 4078036a4a7dSZhenyu Wang 40791a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 40801a56b1a2SImre Deak 4081d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 40827fe0b973SKeith Packard 408350a0bc90STvrtko Ursulin if (IS_IRONLAKE_M(dev_priv)) { 40846005ce42SDaniel Vetter /* Enable PCU event interrupts 40856005ce42SDaniel Vetter * 40866005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 40874bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 40884bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 4089d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4090fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4091d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4092f97108d1SJesse Barnes } 4093f97108d1SJesse Barnes 4094036a4a7dSZhenyu Wang return 0; 4095036a4a7dSZhenyu Wang } 4096036a4a7dSZhenyu Wang 4097f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4098f8b79e58SImre Deak { 409967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4100f8b79e58SImre Deak 4101f8b79e58SImre Deak if (dev_priv->display_irqs_enabled) 4102f8b79e58SImre Deak return; 4103f8b79e58SImre Deak 4104f8b79e58SImre Deak dev_priv->display_irqs_enabled = true; 4105f8b79e58SImre Deak 4106d6c69803SVille Syrjälä if (intel_irqs_enabled(dev_priv)) { 4107d6c69803SVille Syrjälä vlv_display_irq_reset(dev_priv); 4108ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4109f8b79e58SImre Deak } 4110d6c69803SVille Syrjälä } 4111f8b79e58SImre Deak 4112f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4113f8b79e58SImre Deak { 411467520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4115f8b79e58SImre Deak 4116f8b79e58SImre Deak if (!dev_priv->display_irqs_enabled) 4117f8b79e58SImre Deak return; 4118f8b79e58SImre Deak 4119f8b79e58SImre Deak dev_priv->display_irqs_enabled = false; 4120f8b79e58SImre Deak 4121950eabafSImre Deak if (intel_irqs_enabled(dev_priv)) 4122ad22d106SVille Syrjälä vlv_display_irq_reset(dev_priv); 4123f8b79e58SImre Deak } 4124f8b79e58SImre Deak 41250e6c9a9eSVille Syrjälä 41260e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev) 41270e6c9a9eSVille Syrjälä { 4128fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 41290e6c9a9eSVille Syrjälä 41300a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 41317e231dbeSJesse Barnes 4132ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 41339918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4134ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4135ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4136ad22d106SVille Syrjälä 41377e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 413834c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 413920afbda2SDaniel Vetter 414020afbda2SDaniel Vetter return 0; 414120afbda2SDaniel Vetter } 414220afbda2SDaniel Vetter 4143abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4144abd58f01SBen Widawsky { 4145abd58f01SBen Widawsky /* These are interrupts we'll toggle with the ring mask register */ 4146abd58f01SBen Widawsky uint32_t gt_interrupts[] = { 4147abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 414873d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 414973d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 415073d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4151abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 415273d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 415373d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 415473d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4155abd58f01SBen Widawsky 0, 415673d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 415773d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4158abd58f01SBen Widawsky }; 4159abd58f01SBen Widawsky 416098735739STvrtko Ursulin if (HAS_L3_DPF(dev_priv)) 416198735739STvrtko Ursulin gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 416298735739STvrtko Ursulin 4163f4e9af4fSAkash Goel dev_priv->pm_ier = 0x0; 4164f4e9af4fSAkash Goel dev_priv->pm_imr = ~dev_priv->pm_ier; 41659a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 41669a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 416778e68d36SImre Deak /* 416878e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS itself 416926705e20SSagar Arun Kamble * is enabled/disabled. Same wil be the case for GuC interrupts. 417078e68d36SImre Deak */ 4171f4e9af4fSAkash Goel GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 41729a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4173abd58f01SBen Widawsky } 4174abd58f01SBen Widawsky 4175abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4176abd58f01SBen Widawsky { 4177770de83dSDamien Lespiau uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4178770de83dSDamien Lespiau uint32_t de_pipe_enables; 41793a3b3c7dSVille Syrjälä u32 de_port_masked = GEN8_AUX_CHANNEL_A; 41803a3b3c7dSVille Syrjälä u32 de_port_enables; 4181df0d28c1SDhinakaran Pandiyan u32 de_misc_masked = GEN8_DE_EDP_PSR; 41823a3b3c7dSVille Syrjälä enum pipe pipe; 4183770de83dSDamien Lespiau 4184df0d28c1SDhinakaran Pandiyan if (INTEL_GEN(dev_priv) <= 10) 4185df0d28c1SDhinakaran Pandiyan de_misc_masked |= GEN8_DE_MISC_GSE; 4186df0d28c1SDhinakaran Pandiyan 4187bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) { 4188842ebf7aSVille Syrjälä de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 41893a3b3c7dSVille Syrjälä de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 419088e04703SJesse Barnes GEN9_AUX_CHANNEL_D; 4191cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 41923a3b3c7dSVille Syrjälä de_port_masked |= BXT_DE_PORT_GMBUS; 41933a3b3c7dSVille Syrjälä } else { 4194842ebf7aSVille Syrjälä de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 41953a3b3c7dSVille Syrjälä } 4196770de83dSDamien Lespiau 4197bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 4198bb187e93SJames Ausmus de_port_masked |= ICL_AUX_CHANNEL_E; 4199bb187e93SJames Ausmus 42009bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4201a324fcacSRodrigo Vivi de_port_masked |= CNL_AUX_CHANNEL_F; 4202a324fcacSRodrigo Vivi 4203770de83dSDamien Lespiau de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4204770de83dSDamien Lespiau GEN8_PIPE_FIFO_UNDERRUN; 4205770de83dSDamien Lespiau 42063a3b3c7dSVille Syrjälä de_port_enables = de_port_masked; 4207cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4208a52bb15bSVille Syrjälä de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4209a52bb15bSVille Syrjälä else if (IS_BROADWELL(dev_priv)) 42103a3b3c7dSVille Syrjälä de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 42113a3b3c7dSVille Syrjälä 4212e04f7eceSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 421354fd3149SDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4214e04f7eceSVille Syrjälä 42150a195c02SMika Kahola for_each_pipe(dev_priv, pipe) { 42160a195c02SMika Kahola dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4217abd58f01SBen Widawsky 4218f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 4219813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 4220813bde43SPaulo Zanoni GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4221813bde43SPaulo Zanoni dev_priv->de_irq_mask[pipe], 422235079899SPaulo Zanoni de_pipe_enables); 42230a195c02SMika Kahola } 4224abd58f01SBen Widawsky 42253488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 42263488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 42272a57d9ccSImre Deak 4228121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11) { 4229121e758eSDhinakaran Pandiyan u32 de_hpd_masked = 0; 4230b796b971SDhinakaran Pandiyan u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4231b796b971SDhinakaran Pandiyan GEN11_DE_TBT_HOTPLUG_MASK; 4232121e758eSDhinakaran Pandiyan 4233121e758eSDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4234121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 4235121e758eSDhinakaran Pandiyan } else if (IS_GEN9_LP(dev_priv)) { 42362a57d9ccSImre Deak bxt_hpd_detection_setup(dev_priv); 4237121e758eSDhinakaran Pandiyan } else if (IS_BROADWELL(dev_priv)) { 42381a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 4239abd58f01SBen Widawsky } 4240121e758eSDhinakaran Pandiyan } 4241abd58f01SBen Widawsky 4242abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev) 4243abd58f01SBen Widawsky { 4244fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4245abd58f01SBen Widawsky 42466e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4247622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4248622364b6SPaulo Zanoni 4249abd58f01SBen Widawsky gen8_gt_irq_postinstall(dev_priv); 4250abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 4251abd58f01SBen Widawsky 42526e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4253abd58f01SBen Widawsky ibx_irq_postinstall(dev); 4254abd58f01SBen Widawsky 4255e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4256abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 4257abd58f01SBen Widawsky 4258abd58f01SBen Widawsky return 0; 4259abd58f01SBen Widawsky } 4260abd58f01SBen Widawsky 426151951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 426251951ae7SMika Kuoppala { 426351951ae7SMika Kuoppala const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 426451951ae7SMika Kuoppala 426551951ae7SMika Kuoppala BUILD_BUG_ON(irqs & 0xffff0000); 426651951ae7SMika Kuoppala 426751951ae7SMika Kuoppala /* Enable RCS, BCS, VCS and VECS class interrupts. */ 426851951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 426951951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 427051951ae7SMika Kuoppala 427151951ae7SMika Kuoppala /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 427251951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 427351951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 427451951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 427551951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 427651951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 427751951ae7SMika Kuoppala 4278d02b98b8SOscar Mateo /* 4279d02b98b8SOscar Mateo * RPS interrupts will get enabled/disabled on demand when RPS itself 4280d02b98b8SOscar Mateo * is enabled/disabled. 4281d02b98b8SOscar Mateo */ 4282d02b98b8SOscar Mateo dev_priv->pm_ier = 0x0; 4283d02b98b8SOscar Mateo dev_priv->pm_imr = ~dev_priv->pm_ier; 4284d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4285d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 428651951ae7SMika Kuoppala } 428751951ae7SMika Kuoppala 428831604222SAnusha Srivatsa static void icp_irq_postinstall(struct drm_device *dev) 428931604222SAnusha Srivatsa { 429031604222SAnusha Srivatsa struct drm_i915_private *dev_priv = to_i915(dev); 429131604222SAnusha Srivatsa u32 mask = SDE_GMBUS_ICP; 429231604222SAnusha Srivatsa 429331604222SAnusha Srivatsa WARN_ON(I915_READ(SDEIER) != 0); 429431604222SAnusha Srivatsa I915_WRITE(SDEIER, 0xffffffff); 429531604222SAnusha Srivatsa POSTING_READ(SDEIER); 429631604222SAnusha Srivatsa 429731604222SAnusha Srivatsa gen3_assert_iir_is_zero(dev_priv, SDEIIR); 429831604222SAnusha Srivatsa I915_WRITE(SDEIMR, ~mask); 429931604222SAnusha Srivatsa 430031604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 430131604222SAnusha Srivatsa } 430231604222SAnusha Srivatsa 430351951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev) 430451951ae7SMika Kuoppala { 430551951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 4306df0d28c1SDhinakaran Pandiyan u32 gu_misc_masked = GEN11_GU_MISC_GSE; 430751951ae7SMika Kuoppala 430831604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 430931604222SAnusha Srivatsa icp_irq_postinstall(dev); 431031604222SAnusha Srivatsa 431151951ae7SMika Kuoppala gen11_gt_irq_postinstall(dev_priv); 431251951ae7SMika Kuoppala gen8_de_irq_postinstall(dev_priv); 431351951ae7SMika Kuoppala 4314df0d28c1SDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4315df0d28c1SDhinakaran Pandiyan 431651951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 431751951ae7SMika Kuoppala 431851951ae7SMika Kuoppala I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 431951951ae7SMika Kuoppala POSTING_READ(GEN11_GFX_MSTR_IRQ); 432051951ae7SMika Kuoppala 432151951ae7SMika Kuoppala return 0; 432251951ae7SMika Kuoppala } 432351951ae7SMika Kuoppala 432443f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev) 432543f328d7SVille Syrjälä { 4326fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 432743f328d7SVille Syrjälä 432843f328d7SVille Syrjälä gen8_gt_irq_postinstall(dev_priv); 432943f328d7SVille Syrjälä 4330ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 43319918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4332ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4333ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4334ad22d106SVille Syrjälä 4335e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 433643f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 433743f328d7SVille Syrjälä 433843f328d7SVille Syrjälä return 0; 433943f328d7SVille Syrjälä } 434043f328d7SVille Syrjälä 43416bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev) 4342c2798b19SChris Wilson { 4343fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4344c2798b19SChris Wilson 434544d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 434644d9241eSVille Syrjälä 4347d420a50cSVille Syrjälä I915_WRITE16(HWSTAM, 0xffff); 4348d420a50cSVille Syrjälä 4349e9e9848aSVille Syrjälä GEN2_IRQ_RESET(); 4350c2798b19SChris Wilson } 4351c2798b19SChris Wilson 4352c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 4353c2798b19SChris Wilson { 4354fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4355e9e9848aSVille Syrjälä u16 enable_mask; 4356c2798b19SChris Wilson 4357045cebd2SVille Syrjälä I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4358045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 4359c2798b19SChris Wilson 4360c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 4361c2798b19SChris Wilson dev_priv->irq_mask = 4362c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 436316659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 436416659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4365c2798b19SChris Wilson 4366e9e9848aSVille Syrjälä enable_mask = 4367c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4368c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 436916659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4370e9e9848aSVille Syrjälä I915_USER_INTERRUPT; 4371e9e9848aSVille Syrjälä 4372e9e9848aSVille Syrjälä GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4373c2798b19SChris Wilson 4374379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4375379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4376d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4377755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4378755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4379d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4380379ef82dSDaniel Vetter 4381c2798b19SChris Wilson return 0; 4382c2798b19SChris Wilson } 4383c2798b19SChris Wilson 438478c357ddSVille Syrjälä static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 438578c357ddSVille Syrjälä u16 *eir, u16 *eir_stuck) 438678c357ddSVille Syrjälä { 438778c357ddSVille Syrjälä u16 emr; 438878c357ddSVille Syrjälä 438978c357ddSVille Syrjälä *eir = I915_READ16(EIR); 439078c357ddSVille Syrjälä 439178c357ddSVille Syrjälä if (*eir) 439278c357ddSVille Syrjälä I915_WRITE16(EIR, *eir); 439378c357ddSVille Syrjälä 439478c357ddSVille Syrjälä *eir_stuck = I915_READ16(EIR); 439578c357ddSVille Syrjälä if (*eir_stuck == 0) 439678c357ddSVille Syrjälä return; 439778c357ddSVille Syrjälä 439878c357ddSVille Syrjälä /* 439978c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 440078c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 440178c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 440278c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 440378c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 440478c357ddSVille Syrjälä * cleared except by handling the underlying error 440578c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 440678c357ddSVille Syrjälä * remains set. 440778c357ddSVille Syrjälä */ 440878c357ddSVille Syrjälä emr = I915_READ16(EMR); 440978c357ddSVille Syrjälä I915_WRITE16(EMR, 0xffff); 441078c357ddSVille Syrjälä I915_WRITE16(EMR, emr | *eir_stuck); 441178c357ddSVille Syrjälä } 441278c357ddSVille Syrjälä 441378c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 441478c357ddSVille Syrjälä u16 eir, u16 eir_stuck) 441578c357ddSVille Syrjälä { 441678c357ddSVille Syrjälä DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 441778c357ddSVille Syrjälä 441878c357ddSVille Syrjälä if (eir_stuck) 441978c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 442078c357ddSVille Syrjälä } 442178c357ddSVille Syrjälä 442278c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 442378c357ddSVille Syrjälä u32 *eir, u32 *eir_stuck) 442478c357ddSVille Syrjälä { 442578c357ddSVille Syrjälä u32 emr; 442678c357ddSVille Syrjälä 442778c357ddSVille Syrjälä *eir = I915_READ(EIR); 442878c357ddSVille Syrjälä 442978c357ddSVille Syrjälä I915_WRITE(EIR, *eir); 443078c357ddSVille Syrjälä 443178c357ddSVille Syrjälä *eir_stuck = I915_READ(EIR); 443278c357ddSVille Syrjälä if (*eir_stuck == 0) 443378c357ddSVille Syrjälä return; 443478c357ddSVille Syrjälä 443578c357ddSVille Syrjälä /* 443678c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 443778c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 443878c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 443978c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 444078c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 444178c357ddSVille Syrjälä * cleared except by handling the underlying error 444278c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 444378c357ddSVille Syrjälä * remains set. 444478c357ddSVille Syrjälä */ 444578c357ddSVille Syrjälä emr = I915_READ(EMR); 444678c357ddSVille Syrjälä I915_WRITE(EMR, 0xffffffff); 444778c357ddSVille Syrjälä I915_WRITE(EMR, emr | *eir_stuck); 444878c357ddSVille Syrjälä } 444978c357ddSVille Syrjälä 445078c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 445178c357ddSVille Syrjälä u32 eir, u32 eir_stuck) 445278c357ddSVille Syrjälä { 445378c357ddSVille Syrjälä DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 445478c357ddSVille Syrjälä 445578c357ddSVille Syrjälä if (eir_stuck) 445678c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 445778c357ddSVille Syrjälä } 445878c357ddSVille Syrjälä 4459ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4460c2798b19SChris Wilson { 446145a83f84SDaniel Vetter struct drm_device *dev = arg; 4462fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4463af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4464c2798b19SChris Wilson 44652dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 44662dd2a883SImre Deak return IRQ_NONE; 44672dd2a883SImre Deak 44681f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 44691f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 44701f814dacSImre Deak 4471af722d28SVille Syrjälä do { 4472af722d28SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 447378c357ddSVille Syrjälä u16 eir = 0, eir_stuck = 0; 4474af722d28SVille Syrjälä u16 iir; 4475af722d28SVille Syrjälä 4476c2798b19SChris Wilson iir = I915_READ16(IIR); 4477c2798b19SChris Wilson if (iir == 0) 4478af722d28SVille Syrjälä break; 4479c2798b19SChris Wilson 4480af722d28SVille Syrjälä ret = IRQ_HANDLED; 4481c2798b19SChris Wilson 4482eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4483eb64343cSVille Syrjälä * signalled in iir */ 4484eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4485c2798b19SChris Wilson 448678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 448778c357ddSVille Syrjälä i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 448878c357ddSVille Syrjälä 4489fd3a4024SDaniel Vetter I915_WRITE16(IIR, iir); 4490c2798b19SChris Wilson 4491c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 44923b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4493c2798b19SChris Wilson 449478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 449578c357ddSVille Syrjälä i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4496af722d28SVille Syrjälä 4497eb64343cSVille Syrjälä i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4498af722d28SVille Syrjälä } while (0); 4499c2798b19SChris Wilson 45001f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 45011f814dacSImre Deak 45021f814dacSImre Deak return ret; 4503c2798b19SChris Wilson } 4504c2798b19SChris Wilson 45056bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev) 4506a266c7d5SChris Wilson { 4507fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4508a266c7d5SChris Wilson 450956b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 45100706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4511a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4512a266c7d5SChris Wilson } 4513a266c7d5SChris Wilson 451444d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 451544d9241eSVille Syrjälä 4516d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 451744d9241eSVille Syrjälä 4518ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4519a266c7d5SChris Wilson } 4520a266c7d5SChris Wilson 4521a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 4522a266c7d5SChris Wilson { 4523fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 452438bde180SChris Wilson u32 enable_mask; 4525a266c7d5SChris Wilson 4526045cebd2SVille Syrjälä I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4527045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 452838bde180SChris Wilson 452938bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 453038bde180SChris Wilson dev_priv->irq_mask = 453138bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 453238bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 453316659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 453416659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 453538bde180SChris Wilson 453638bde180SChris Wilson enable_mask = 453738bde180SChris Wilson I915_ASLE_INTERRUPT | 453838bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 453938bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 454016659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 454138bde180SChris Wilson I915_USER_INTERRUPT; 454238bde180SChris Wilson 454356b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 4544a266c7d5SChris Wilson /* Enable in IER... */ 4545a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4546a266c7d5SChris Wilson /* and unmask in IMR */ 4547a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4548a266c7d5SChris Wilson } 4549a266c7d5SChris Wilson 4550ba7eb789SVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4551a266c7d5SChris Wilson 4552379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4553379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4554d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4555755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4556755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4557d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4558379ef82dSDaniel Vetter 4559c30bb1fdSVille Syrjälä i915_enable_asle_pipestat(dev_priv); 4560c30bb1fdSVille Syrjälä 456120afbda2SDaniel Vetter return 0; 456220afbda2SDaniel Vetter } 456320afbda2SDaniel Vetter 4564ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 4565a266c7d5SChris Wilson { 456645a83f84SDaniel Vetter struct drm_device *dev = arg; 4567fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4568af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4569a266c7d5SChris Wilson 45702dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 45712dd2a883SImre Deak return IRQ_NONE; 45722dd2a883SImre Deak 45731f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 45741f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 45751f814dacSImre Deak 457638bde180SChris Wilson do { 4577eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 457878c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4579af722d28SVille Syrjälä u32 hotplug_status = 0; 4580af722d28SVille Syrjälä u32 iir; 4581a266c7d5SChris Wilson 4582af722d28SVille Syrjälä iir = I915_READ(IIR); 4583af722d28SVille Syrjälä if (iir == 0) 4584af722d28SVille Syrjälä break; 4585af722d28SVille Syrjälä 4586af722d28SVille Syrjälä ret = IRQ_HANDLED; 4587af722d28SVille Syrjälä 4588af722d28SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv) && 4589af722d28SVille Syrjälä iir & I915_DISPLAY_PORT_INTERRUPT) 4590af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4591a266c7d5SChris Wilson 4592eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4593eb64343cSVille Syrjälä * signalled in iir */ 4594eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4595a266c7d5SChris Wilson 459678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 459778c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 459878c357ddSVille Syrjälä 4599fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4600a266c7d5SChris Wilson 4601a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 46023b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4603a266c7d5SChris Wilson 460478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 460578c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4606a266c7d5SChris Wilson 4607af722d28SVille Syrjälä if (hotplug_status) 4608af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4609af722d28SVille Syrjälä 4610af722d28SVille Syrjälä i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4611af722d28SVille Syrjälä } while (0); 4612a266c7d5SChris Wilson 46131f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 46141f814dacSImre Deak 4615a266c7d5SChris Wilson return ret; 4616a266c7d5SChris Wilson } 4617a266c7d5SChris Wilson 46186bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev) 4619a266c7d5SChris Wilson { 4620fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4621a266c7d5SChris Wilson 46220706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4623a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4624a266c7d5SChris Wilson 462544d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 462644d9241eSVille Syrjälä 4627d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 462844d9241eSVille Syrjälä 4629ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4630a266c7d5SChris Wilson } 4631a266c7d5SChris Wilson 4632a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 4633a266c7d5SChris Wilson { 4634fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4635bbba0a97SChris Wilson u32 enable_mask; 4636a266c7d5SChris Wilson u32 error_mask; 4637a266c7d5SChris Wilson 4638045cebd2SVille Syrjälä /* 4639045cebd2SVille Syrjälä * Enable some error detection, note the instruction error mask 4640045cebd2SVille Syrjälä * bit is reserved, so we leave it masked. 4641045cebd2SVille Syrjälä */ 4642045cebd2SVille Syrjälä if (IS_G4X(dev_priv)) { 4643045cebd2SVille Syrjälä error_mask = ~(GM45_ERROR_PAGE_TABLE | 4644045cebd2SVille Syrjälä GM45_ERROR_MEM_PRIV | 4645045cebd2SVille Syrjälä GM45_ERROR_CP_PRIV | 4646045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4647045cebd2SVille Syrjälä } else { 4648045cebd2SVille Syrjälä error_mask = ~(I915_ERROR_PAGE_TABLE | 4649045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4650045cebd2SVille Syrjälä } 4651045cebd2SVille Syrjälä I915_WRITE(EMR, error_mask); 4652045cebd2SVille Syrjälä 4653a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 4654c30bb1fdSVille Syrjälä dev_priv->irq_mask = 4655c30bb1fdSVille Syrjälä ~(I915_ASLE_INTERRUPT | 4656adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 4657bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4658bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 465978c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4660bbba0a97SChris Wilson 4661c30bb1fdSVille Syrjälä enable_mask = 4662c30bb1fdSVille Syrjälä I915_ASLE_INTERRUPT | 4663c30bb1fdSVille Syrjälä I915_DISPLAY_PORT_INTERRUPT | 4664c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4665c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 466678c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4667c30bb1fdSVille Syrjälä I915_USER_INTERRUPT; 4668bbba0a97SChris Wilson 466991d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4670bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 4671a266c7d5SChris Wilson 4672c30bb1fdSVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4673c30bb1fdSVille Syrjälä 4674b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4675b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4676d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4677755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4678755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4679755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4680d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4681a266c7d5SChris Wilson 468291d14251STvrtko Ursulin i915_enable_asle_pipestat(dev_priv); 468320afbda2SDaniel Vetter 468420afbda2SDaniel Vetter return 0; 468520afbda2SDaniel Vetter } 468620afbda2SDaniel Vetter 468791d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 468820afbda2SDaniel Vetter { 468920afbda2SDaniel Vetter u32 hotplug_en; 469020afbda2SDaniel Vetter 469167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4692b5ea2d56SDaniel Vetter 4693adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 4694e5868a31SEgbert Eich /* enable bits are the same for all generations */ 469591d14251STvrtko Ursulin hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4696a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 4697a266c7d5SChris Wilson to generate a spurious hotplug event about three 4698a266c7d5SChris Wilson seconds later. So just do it once. 4699a266c7d5SChris Wilson */ 470091d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4701a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4702a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4703a266c7d5SChris Wilson 4704a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 47050706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, 4706f9e3dc78SJani Nikula HOTPLUG_INT_EN_MASK | 4707f9e3dc78SJani Nikula CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4708f9e3dc78SJani Nikula CRT_HOTPLUG_ACTIVATION_PERIOD_64, 47090706f17cSEgbert Eich hotplug_en); 4710a266c7d5SChris Wilson } 4711a266c7d5SChris Wilson 4712ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 4713a266c7d5SChris Wilson { 471445a83f84SDaniel Vetter struct drm_device *dev = arg; 4715fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4716af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4717a266c7d5SChris Wilson 47182dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 47192dd2a883SImre Deak return IRQ_NONE; 47202dd2a883SImre Deak 47211f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 47221f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 47231f814dacSImre Deak 4724af722d28SVille Syrjälä do { 4725eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 472678c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4727af722d28SVille Syrjälä u32 hotplug_status = 0; 4728af722d28SVille Syrjälä u32 iir; 47292c8ba29fSChris Wilson 4730af722d28SVille Syrjälä iir = I915_READ(IIR); 4731af722d28SVille Syrjälä if (iir == 0) 4732af722d28SVille Syrjälä break; 4733af722d28SVille Syrjälä 4734af722d28SVille Syrjälä ret = IRQ_HANDLED; 4735af722d28SVille Syrjälä 4736af722d28SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 4737af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4738a266c7d5SChris Wilson 4739eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4740eb64343cSVille Syrjälä * signalled in iir */ 4741eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4742a266c7d5SChris Wilson 474378c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 474478c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 474578c357ddSVille Syrjälä 4746fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4747a266c7d5SChris Wilson 4748a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 47493b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4750af722d28SVille Syrjälä 4751a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 47523b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 4753a266c7d5SChris Wilson 475478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 475578c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4756515ac2bbSDaniel Vetter 4757af722d28SVille Syrjälä if (hotplug_status) 4758af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4759af722d28SVille Syrjälä 4760af722d28SVille Syrjälä i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4761af722d28SVille Syrjälä } while (0); 4762a266c7d5SChris Wilson 47631f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 47641f814dacSImre Deak 4765a266c7d5SChris Wilson return ret; 4766a266c7d5SChris Wilson } 4767a266c7d5SChris Wilson 4768fca52a55SDaniel Vetter /** 4769fca52a55SDaniel Vetter * intel_irq_init - initializes irq support 4770fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4771fca52a55SDaniel Vetter * 4772fca52a55SDaniel Vetter * This function initializes all the irq support including work items, timers 4773fca52a55SDaniel Vetter * and all the vtables. It does not setup the interrupt itself though. 4774fca52a55SDaniel Vetter */ 4775b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv) 4776f71d4af4SJesse Barnes { 477791c8a326SChris Wilson struct drm_device *dev = &dev_priv->drm; 4778562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 4779cefcff8fSJoonas Lahtinen int i; 47808b2e326dSChris Wilson 478177913b39SJani Nikula intel_hpd_init_work(dev_priv); 478277913b39SJani Nikula 4783562d9baeSSagar Arun Kamble INIT_WORK(&rps->work, gen6_pm_rps_work); 4784cefcff8fSJoonas Lahtinen 4785a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4786cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4787cefcff8fSJoonas Lahtinen dev_priv->l3_parity.remap_info[i] = NULL; 47888b2e326dSChris Wilson 47894805fe82STvrtko Ursulin if (HAS_GUC_SCHED(dev_priv)) 479026705e20SSagar Arun Kamble dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 479126705e20SSagar Arun Kamble 4792a6706b45SDeepak S /* Let's track the enabled rps events */ 4793666a4537SWayne Boyer if (IS_VALLEYVIEW(dev_priv)) 47946c65a587SVille Syrjälä /* WaGsvRC0ResidencyMethod:vlv */ 4795e0e8c7cbSChris Wilson dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 479631685c25SDeepak S else 47974668f695SChris Wilson dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 47984668f695SChris Wilson GEN6_PM_RP_DOWN_THRESHOLD | 47994668f695SChris Wilson GEN6_PM_RP_DOWN_TIMEOUT); 4800a6706b45SDeepak S 4801562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz = 0; 48021800ad25SSagar Arun Kamble 48031800ad25SSagar Arun Kamble /* 4804acf2dc22SMika Kuoppala * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 48051800ad25SSagar Arun Kamble * if GEN6_PM_UP_EI_EXPIRED is masked. 48061800ad25SSagar Arun Kamble * 48071800ad25SSagar Arun Kamble * TODO: verify if this can be reproduced on VLV,CHV. 48081800ad25SSagar Arun Kamble */ 4809bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) <= 7) 4810562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 48111800ad25SSagar Arun Kamble 4812bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 4813562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 48141800ad25SSagar Arun Kamble 4815b963291cSDaniel Vetter if (IS_GEN2(dev_priv)) { 48164194c088SRodrigo Vivi /* Gen2 doesn't have a hardware frame counter */ 48174cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 4818bca2bf2aSPandiyan, Dhinakaran } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4819f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4820fd8f507cSVille Syrjälä dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4821391f75e2SVille Syrjälä } else { 4822391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 4823391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4824f71d4af4SJesse Barnes } 4825f71d4af4SJesse Barnes 482621da2700SVille Syrjälä /* 482721da2700SVille Syrjälä * Opt out of the vblank disable timer on everything except gen2. 482821da2700SVille Syrjälä * Gen2 doesn't have a hardware frame counter and so depends on 482921da2700SVille Syrjälä * vblank interrupts to produce sane vblank seuquence numbers. 483021da2700SVille Syrjälä */ 4831b963291cSDaniel Vetter if (!IS_GEN2(dev_priv)) 483221da2700SVille Syrjälä dev->vblank_disable_immediate = true; 483321da2700SVille Syrjälä 4834262fd485SChris Wilson /* Most platforms treat the display irq block as an always-on 4835262fd485SChris Wilson * power domain. vlv/chv can disable it at runtime and need 4836262fd485SChris Wilson * special care to avoid writing any of the display block registers 4837262fd485SChris Wilson * outside of the power domain. We defer setting up the display irqs 4838262fd485SChris Wilson * in this case to the runtime pm. 4839262fd485SChris Wilson */ 4840262fd485SChris Wilson dev_priv->display_irqs_enabled = true; 4841262fd485SChris Wilson if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4842262fd485SChris Wilson dev_priv->display_irqs_enabled = false; 4843262fd485SChris Wilson 4844317eaa95SLyude dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4845317eaa95SLyude 48461bf6ad62SDaniel Vetter dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4847f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4848f71d4af4SJesse Barnes 4849b963291cSDaniel Vetter if (IS_CHERRYVIEW(dev_priv)) { 485043f328d7SVille Syrjälä dev->driver->irq_handler = cherryview_irq_handler; 48516bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = cherryview_irq_reset; 485243f328d7SVille Syrjälä dev->driver->irq_postinstall = cherryview_irq_postinstall; 48536bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = cherryview_irq_reset; 485486e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 485586e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 485643f328d7SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4857b963291cSDaniel Vetter } else if (IS_VALLEYVIEW(dev_priv)) { 48587e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 48596bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = valleyview_irq_reset; 48607e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 48616bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = valleyview_irq_reset; 486286e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 486386e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4864fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 486551951ae7SMika Kuoppala } else if (INTEL_GEN(dev_priv) >= 11) { 486651951ae7SMika Kuoppala dev->driver->irq_handler = gen11_irq_handler; 486751951ae7SMika Kuoppala dev->driver->irq_preinstall = gen11_irq_reset; 486851951ae7SMika Kuoppala dev->driver->irq_postinstall = gen11_irq_postinstall; 486951951ae7SMika Kuoppala dev->driver->irq_uninstall = gen11_irq_reset; 487051951ae7SMika Kuoppala dev->driver->enable_vblank = gen8_enable_vblank; 487151951ae7SMika Kuoppala dev->driver->disable_vblank = gen8_disable_vblank; 4872121e758eSDhinakaran Pandiyan dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4873bca2bf2aSPandiyan, Dhinakaran } else if (INTEL_GEN(dev_priv) >= 8) { 4874abd58f01SBen Widawsky dev->driver->irq_handler = gen8_irq_handler; 4875723761b8SDaniel Vetter dev->driver->irq_preinstall = gen8_irq_reset; 4876abd58f01SBen Widawsky dev->driver->irq_postinstall = gen8_irq_postinstall; 48776bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = gen8_irq_reset; 4878abd58f01SBen Widawsky dev->driver->enable_vblank = gen8_enable_vblank; 4879abd58f01SBen Widawsky dev->driver->disable_vblank = gen8_disable_vblank; 4880cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4881e0a20ad7SShashank Sharma dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 48827b22b8c4SRodrigo Vivi else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 48837b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 48846dbf30ceSVille Syrjälä dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 48856dbf30ceSVille Syrjälä else 48863a3b3c7dSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 48876e266956STvrtko Ursulin } else if (HAS_PCH_SPLIT(dev_priv)) { 4888f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 4889723761b8SDaniel Vetter dev->driver->irq_preinstall = ironlake_irq_reset; 4890f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 48916bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = ironlake_irq_reset; 4892f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 4893f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 4894e4ce95aaSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4895f71d4af4SJesse Barnes } else { 48967e22dbbbSTvrtko Ursulin if (IS_GEN2(dev_priv)) { 48976bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i8xx_irq_reset; 4898c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 4899c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 49006bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i8xx_irq_reset; 490186e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 490286e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 49037e22dbbbSTvrtko Ursulin } else if (IS_GEN3(dev_priv)) { 49046bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i915_irq_reset; 4905a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 49066bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i915_irq_reset; 4907a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 490886e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 490986e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4910c2798b19SChris Wilson } else { 49116bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i965_irq_reset; 4912a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 49136bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i965_irq_reset; 4914a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 491586e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 491686e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4917c2798b19SChris Wilson } 4918778eb334SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv)) 4919778eb334SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4920f71d4af4SJesse Barnes } 4921f71d4af4SJesse Barnes } 492220afbda2SDaniel Vetter 4923fca52a55SDaniel Vetter /** 4924cefcff8fSJoonas Lahtinen * intel_irq_fini - deinitializes IRQ support 4925cefcff8fSJoonas Lahtinen * @i915: i915 device instance 4926cefcff8fSJoonas Lahtinen * 4927cefcff8fSJoonas Lahtinen * This function deinitializes all the IRQ support. 4928cefcff8fSJoonas Lahtinen */ 4929cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915) 4930cefcff8fSJoonas Lahtinen { 4931cefcff8fSJoonas Lahtinen int i; 4932cefcff8fSJoonas Lahtinen 4933cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4934cefcff8fSJoonas Lahtinen kfree(i915->l3_parity.remap_info[i]); 4935cefcff8fSJoonas Lahtinen } 4936cefcff8fSJoonas Lahtinen 4937cefcff8fSJoonas Lahtinen /** 4938fca52a55SDaniel Vetter * intel_irq_install - enables the hardware interrupt 4939fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4940fca52a55SDaniel Vetter * 4941fca52a55SDaniel Vetter * This function enables the hardware interrupt handling, but leaves the hotplug 4942fca52a55SDaniel Vetter * handling still disabled. It is called after intel_irq_init(). 4943fca52a55SDaniel Vetter * 4944fca52a55SDaniel Vetter * In the driver load and resume code we need working interrupts in a few places 4945fca52a55SDaniel Vetter * but don't want to deal with the hassle of concurrent probe and hotplug 4946fca52a55SDaniel Vetter * workers. Hence the split into this two-stage approach. 4947fca52a55SDaniel Vetter */ 49482aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv) 49492aeb7d3aSDaniel Vetter { 49502aeb7d3aSDaniel Vetter /* 49512aeb7d3aSDaniel Vetter * We enable some interrupt sources in our postinstall hooks, so mark 49522aeb7d3aSDaniel Vetter * interrupts as enabled _before_ actually enabling them to avoid 49532aeb7d3aSDaniel Vetter * special cases in our ordering checks. 49542aeb7d3aSDaniel Vetter */ 4955ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 49562aeb7d3aSDaniel Vetter 495791c8a326SChris Wilson return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 49582aeb7d3aSDaniel Vetter } 49592aeb7d3aSDaniel Vetter 4960fca52a55SDaniel Vetter /** 4961fca52a55SDaniel Vetter * intel_irq_uninstall - finilizes all irq handling 4962fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4963fca52a55SDaniel Vetter * 4964fca52a55SDaniel Vetter * This stops interrupt and hotplug handling and unregisters and frees all 4965fca52a55SDaniel Vetter * resources acquired in the init functions. 4966fca52a55SDaniel Vetter */ 49672aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv) 49682aeb7d3aSDaniel Vetter { 496991c8a326SChris Wilson drm_irq_uninstall(&dev_priv->drm); 49702aeb7d3aSDaniel Vetter intel_hpd_cancel_work(dev_priv); 4971ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 49722aeb7d3aSDaniel Vetter } 49732aeb7d3aSDaniel Vetter 4974fca52a55SDaniel Vetter /** 4975fca52a55SDaniel Vetter * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4976fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4977fca52a55SDaniel Vetter * 4978fca52a55SDaniel Vetter * This function is used to disable interrupts at runtime, both in the runtime 4979fca52a55SDaniel Vetter * pm and the system suspend/resume code. 4980fca52a55SDaniel Vetter */ 4981b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4982c67a470bSPaulo Zanoni { 498391c8a326SChris Wilson dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4984ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 498591c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 4986c67a470bSPaulo Zanoni } 4987c67a470bSPaulo Zanoni 4988fca52a55SDaniel Vetter /** 4989fca52a55SDaniel Vetter * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4990fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4991fca52a55SDaniel Vetter * 4992fca52a55SDaniel Vetter * This function is used to enable interrupts at runtime, both in the runtime 4993fca52a55SDaniel Vetter * pm and the system suspend/resume code. 4994fca52a55SDaniel Vetter */ 4995b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4996c67a470bSPaulo Zanoni { 4997ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 499891c8a326SChris Wilson dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 499991c8a326SChris Wilson dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 5000c67a470bSPaulo Zanoni } 5001