1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34760285e7SDavid Howells #include <drm/drmP.h> 35760285e7SDavid Howells #include <drm/i915_drm.h> 36c0e09200SDave Airlie #include "i915_drv.h" 371c5d22f7SChris Wilson #include "i915_trace.h" 3879e53945SJesse Barnes #include "intel_drv.h" 39c0e09200SDave Airlie 40fca52a55SDaniel Vetter /** 41fca52a55SDaniel Vetter * DOC: interrupt handling 42fca52a55SDaniel Vetter * 43fca52a55SDaniel Vetter * These functions provide the basic support for enabling and disabling the 44fca52a55SDaniel Vetter * interrupt handling support. There's a lot more functionality in i915_irq.c 45fca52a55SDaniel Vetter * and related files, but that will be described in separate chapters. 46fca52a55SDaniel Vetter */ 47fca52a55SDaniel Vetter 48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = { 49e4ce95aaSVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50e4ce95aaSVille Syrjälä }; 51e4ce95aaSVille Syrjälä 5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = { 5323bb4cb5SVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 5423bb4cb5SVille Syrjälä }; 5523bb4cb5SVille Syrjälä 563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = { 573a3b3c7dSVille Syrjälä [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 583a3b3c7dSVille Syrjälä }; 593a3b3c7dSVille Syrjälä 607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = { 61e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 62e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66e5868a31SEgbert Eich }; 67e5868a31SEgbert Eich 687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = { 69e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 7073c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74e5868a31SEgbert Eich }; 75e5868a31SEgbert Eich 7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = { 7774c0b395SVille Syrjälä [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 7826951cafSXiong Zhang [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 7926951cafSXiong Zhang [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 8026951cafSXiong Zhang [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 8126951cafSXiong Zhang [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 8226951cafSXiong Zhang }; 8326951cafSXiong Zhang 847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91e5868a31SEgbert Eich }; 92e5868a31SEgbert Eich 937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100e5868a31SEgbert Eich }; 101e5868a31SEgbert Eich 1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109e5868a31SEgbert Eich }; 110e5868a31SEgbert Eich 111e0a20ad7SShashank Sharma /* BXT hpd list */ 112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = { 1137f3561beSSonika Jindal [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114e0a20ad7SShashank Sharma [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115e0a20ad7SShashank Sharma [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116e0a20ad7SShashank Sharma }; 117e0a20ad7SShashank Sharma 118b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = { 119b796b971SDhinakaran Pandiyan [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120b796b971SDhinakaran Pandiyan [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121b796b971SDhinakaran Pandiyan [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122b796b971SDhinakaran Pandiyan [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123121e758eSDhinakaran Pandiyan }; 124121e758eSDhinakaran Pandiyan 12531604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = { 12631604222SAnusha Srivatsa [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 12731604222SAnusha Srivatsa [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 12831604222SAnusha Srivatsa [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 12931604222SAnusha Srivatsa [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 13031604222SAnusha Srivatsa [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 13131604222SAnusha Srivatsa [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 13231604222SAnusha Srivatsa }; 13331604222SAnusha Srivatsa 1345c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 135f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \ 1365c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 1375c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IMR(which)); \ 1385c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), 0); \ 1395c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1405c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1415c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1425c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1435c502442SPaulo Zanoni } while (0) 1445c502442SPaulo Zanoni 1453488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \ 146a9d356a6SPaulo Zanoni I915_WRITE(type##IMR, 0xffffffff); \ 1475c502442SPaulo Zanoni POSTING_READ(type##IMR); \ 148a9d356a6SPaulo Zanoni I915_WRITE(type##IER, 0); \ 1495c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1505c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 1515c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1525c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 153a9d356a6SPaulo Zanoni } while (0) 154a9d356a6SPaulo Zanoni 155e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \ 156e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, 0xffff); \ 157e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 158e9e9848aSVille Syrjälä I915_WRITE16(type##IER, 0); \ 159e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 160e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 161e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 162e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 163e9e9848aSVille Syrjälä } while (0) 164e9e9848aSVille Syrjälä 165337ba017SPaulo Zanoni /* 166337ba017SPaulo Zanoni * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167337ba017SPaulo Zanoni */ 1683488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169f0f59a00SVille Syrjälä i915_reg_t reg) 170b51a2842SVille Syrjälä { 171b51a2842SVille Syrjälä u32 val = I915_READ(reg); 172b51a2842SVille Syrjälä 173b51a2842SVille Syrjälä if (val == 0) 174b51a2842SVille Syrjälä return; 175b51a2842SVille Syrjälä 176b51a2842SVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177f0f59a00SVille Syrjälä i915_mmio_reg_offset(reg), val); 178b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 179b51a2842SVille Syrjälä POSTING_READ(reg); 180b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 181b51a2842SVille Syrjälä POSTING_READ(reg); 182b51a2842SVille Syrjälä } 183337ba017SPaulo Zanoni 184e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185e9e9848aSVille Syrjälä i915_reg_t reg) 186e9e9848aSVille Syrjälä { 187e9e9848aSVille Syrjälä u16 val = I915_READ16(reg); 188e9e9848aSVille Syrjälä 189e9e9848aSVille Syrjälä if (val == 0) 190e9e9848aSVille Syrjälä return; 191e9e9848aSVille Syrjälä 192e9e9848aSVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193e9e9848aSVille Syrjälä i915_mmio_reg_offset(reg), val); 194e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 195e9e9848aSVille Syrjälä POSTING_READ16(reg); 196e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 197e9e9848aSVille Syrjälä POSTING_READ16(reg); 198e9e9848aSVille Syrjälä } 199e9e9848aSVille Syrjälä 20035079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 2013488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 20235079899SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 2037d1bd539SVille Syrjälä I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 2047d1bd539SVille Syrjälä POSTING_READ(GEN8_##type##_IMR(which)); \ 20535079899SPaulo Zanoni } while (0) 20635079899SPaulo Zanoni 2073488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 2083488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 20935079899SPaulo Zanoni I915_WRITE(type##IER, (ier_val)); \ 2107d1bd539SVille Syrjälä I915_WRITE(type##IMR, (imr_val)); \ 2117d1bd539SVille Syrjälä POSTING_READ(type##IMR); \ 21235079899SPaulo Zanoni } while (0) 21335079899SPaulo Zanoni 214e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215e9e9848aSVille Syrjälä gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216e9e9848aSVille Syrjälä I915_WRITE16(type##IER, (ier_val)); \ 217e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, (imr_val)); \ 218e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 219e9e9848aSVille Syrjälä } while (0) 220e9e9848aSVille Syrjälä 221c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 22226705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223c9a9a268SImre Deak 2240706f17cSEgbert Eich /* For display hotplug interrupt */ 2250706f17cSEgbert Eich static inline void 2260706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 2270706f17cSEgbert Eich uint32_t mask, 2280706f17cSEgbert Eich uint32_t bits) 2290706f17cSEgbert Eich { 2300706f17cSEgbert Eich uint32_t val; 2310706f17cSEgbert Eich 23267520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 2330706f17cSEgbert Eich WARN_ON(bits & ~mask); 2340706f17cSEgbert Eich 2350706f17cSEgbert Eich val = I915_READ(PORT_HOTPLUG_EN); 2360706f17cSEgbert Eich val &= ~mask; 2370706f17cSEgbert Eich val |= bits; 2380706f17cSEgbert Eich I915_WRITE(PORT_HOTPLUG_EN, val); 2390706f17cSEgbert Eich } 2400706f17cSEgbert Eich 2410706f17cSEgbert Eich /** 2420706f17cSEgbert Eich * i915_hotplug_interrupt_update - update hotplug interrupt enable 2430706f17cSEgbert Eich * @dev_priv: driver private 2440706f17cSEgbert Eich * @mask: bits to update 2450706f17cSEgbert Eich * @bits: bits to enable 2460706f17cSEgbert Eich * NOTE: the HPD enable bits are modified both inside and outside 2470706f17cSEgbert Eich * of an interrupt context. To avoid that read-modify-write cycles 2480706f17cSEgbert Eich * interfer, these bits are protected by a spinlock. Since this 2490706f17cSEgbert Eich * function is usually not called from a context where the lock is 2500706f17cSEgbert Eich * held already, this function acquires the lock itself. A non-locking 2510706f17cSEgbert Eich * version is also available. 2520706f17cSEgbert Eich */ 2530706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2540706f17cSEgbert Eich uint32_t mask, 2550706f17cSEgbert Eich uint32_t bits) 2560706f17cSEgbert Eich { 2570706f17cSEgbert Eich spin_lock_irq(&dev_priv->irq_lock); 2580706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 2590706f17cSEgbert Eich spin_unlock_irq(&dev_priv->irq_lock); 2600706f17cSEgbert Eich } 2610706f17cSEgbert Eich 26296606f3bSOscar Mateo static u32 26396606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915, 26496606f3bSOscar Mateo const unsigned int bank, const unsigned int bit); 26596606f3bSOscar Mateo 26660a94324SChris Wilson static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 26796606f3bSOscar Mateo const unsigned int bank, 26896606f3bSOscar Mateo const unsigned int bit) 26996606f3bSOscar Mateo { 27096606f3bSOscar Mateo void __iomem * const regs = i915->regs; 27196606f3bSOscar Mateo u32 dw; 27296606f3bSOscar Mateo 27396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 27496606f3bSOscar Mateo 27596606f3bSOscar Mateo dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 27696606f3bSOscar Mateo if (dw & BIT(bit)) { 27796606f3bSOscar Mateo /* 27896606f3bSOscar Mateo * According to the BSpec, DW_IIR bits cannot be cleared without 27996606f3bSOscar Mateo * first servicing the Selector & Shared IIR registers. 28096606f3bSOscar Mateo */ 28196606f3bSOscar Mateo gen11_gt_engine_identity(i915, bank, bit); 28296606f3bSOscar Mateo 28396606f3bSOscar Mateo /* 28496606f3bSOscar Mateo * We locked GT INT DW by reading it. If we want to (try 28596606f3bSOscar Mateo * to) recover from this succesfully, we need to clear 28696606f3bSOscar Mateo * our bit, otherwise we are locking the register for 28796606f3bSOscar Mateo * everybody. 28896606f3bSOscar Mateo */ 28996606f3bSOscar Mateo raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 29096606f3bSOscar Mateo 29196606f3bSOscar Mateo return true; 29296606f3bSOscar Mateo } 29396606f3bSOscar Mateo 29496606f3bSOscar Mateo return false; 29596606f3bSOscar Mateo } 29696606f3bSOscar Mateo 297d9dc34f1SVille Syrjälä /** 298d9dc34f1SVille Syrjälä * ilk_update_display_irq - update DEIMR 299d9dc34f1SVille Syrjälä * @dev_priv: driver private 300d9dc34f1SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 301d9dc34f1SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 302d9dc34f1SVille Syrjälä */ 303fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304d9dc34f1SVille Syrjälä uint32_t interrupt_mask, 305d9dc34f1SVille Syrjälä uint32_t enabled_irq_mask) 306036a4a7dSZhenyu Wang { 307d9dc34f1SVille Syrjälä uint32_t new_val; 308d9dc34f1SVille Syrjälä 30967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 3104bc9d430SDaniel Vetter 311d9dc34f1SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 312d9dc34f1SVille Syrjälä 3139df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314c67a470bSPaulo Zanoni return; 315c67a470bSPaulo Zanoni 316d9dc34f1SVille Syrjälä new_val = dev_priv->irq_mask; 317d9dc34f1SVille Syrjälä new_val &= ~interrupt_mask; 318d9dc34f1SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 319d9dc34f1SVille Syrjälä 320d9dc34f1SVille Syrjälä if (new_val != dev_priv->irq_mask) { 321d9dc34f1SVille Syrjälä dev_priv->irq_mask = new_val; 3221ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 3233143a2bfSChris Wilson POSTING_READ(DEIMR); 324036a4a7dSZhenyu Wang } 325036a4a7dSZhenyu Wang } 326036a4a7dSZhenyu Wang 32743eaea13SPaulo Zanoni /** 32843eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 32943eaea13SPaulo Zanoni * @dev_priv: driver private 33043eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 33143eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 33243eaea13SPaulo Zanoni */ 33343eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 33443eaea13SPaulo Zanoni uint32_t interrupt_mask, 33543eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 33643eaea13SPaulo Zanoni { 33767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 33843eaea13SPaulo Zanoni 33915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 34015a17aaeSDaniel Vetter 3419df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342c67a470bSPaulo Zanoni return; 343c67a470bSPaulo Zanoni 34443eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 34543eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 34643eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 34743eaea13SPaulo Zanoni } 34843eaea13SPaulo Zanoni 349480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35043eaea13SPaulo Zanoni { 35143eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 35231bb59ccSChris Wilson POSTING_READ_FW(GTIMR); 35343eaea13SPaulo Zanoni } 35443eaea13SPaulo Zanoni 355480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35643eaea13SPaulo Zanoni { 35743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 35843eaea13SPaulo Zanoni } 35943eaea13SPaulo Zanoni 360f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361b900b949SImre Deak { 362d02b98b8SOscar Mateo WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363d02b98b8SOscar Mateo 364bca2bf2aSPandiyan, Dhinakaran return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365b900b949SImre Deak } 366b900b949SImre Deak 367f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368a72fbc3aSImre Deak { 369d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 370d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_MASK; 371d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 372d02b98b8SOscar Mateo return GEN8_GT_IMR(2); 373d02b98b8SOscar Mateo else 374d02b98b8SOscar Mateo return GEN6_PMIMR; 375a72fbc3aSImre Deak } 376a72fbc3aSImre Deak 377f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378b900b949SImre Deak { 379d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 380d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 382d02b98b8SOscar Mateo return GEN8_GT_IER(2); 383d02b98b8SOscar Mateo else 384d02b98b8SOscar Mateo return GEN6_PMIER; 385b900b949SImre Deak } 386b900b949SImre Deak 387edbfdb45SPaulo Zanoni /** 388edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 389edbfdb45SPaulo Zanoni * @dev_priv: driver private 390edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 391edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 392edbfdb45SPaulo Zanoni */ 393edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 395edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 396edbfdb45SPaulo Zanoni { 397605cd25bSPaulo Zanoni uint32_t new_val; 398edbfdb45SPaulo Zanoni 39915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 40015a17aaeSDaniel Vetter 40167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 402edbfdb45SPaulo Zanoni 403f4e9af4fSAkash Goel new_val = dev_priv->pm_imr; 404f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 405f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 406f52ecbcfSPaulo Zanoni 407f4e9af4fSAkash Goel if (new_val != dev_priv->pm_imr) { 408f4e9af4fSAkash Goel dev_priv->pm_imr = new_val; 409f4e9af4fSAkash Goel I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410a72fbc3aSImre Deak POSTING_READ(gen6_pm_imr(dev_priv)); 411edbfdb45SPaulo Zanoni } 412f52ecbcfSPaulo Zanoni } 413edbfdb45SPaulo Zanoni 414f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415edbfdb45SPaulo Zanoni { 4169939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4179939fba2SImre Deak return; 4189939fba2SImre Deak 419edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 420edbfdb45SPaulo Zanoni } 421edbfdb45SPaulo Zanoni 422f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 4239939fba2SImre Deak { 4249939fba2SImre Deak snb_update_pm_irq(dev_priv, mask, 0); 4259939fba2SImre Deak } 4269939fba2SImre Deak 427f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428edbfdb45SPaulo Zanoni { 4299939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4309939fba2SImre Deak return; 4319939fba2SImre Deak 432f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, mask); 433f4e9af4fSAkash Goel } 434f4e9af4fSAkash Goel 4353814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436f4e9af4fSAkash Goel { 437f4e9af4fSAkash Goel i915_reg_t reg = gen6_pm_iir(dev_priv); 438f4e9af4fSAkash Goel 43967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 440f4e9af4fSAkash Goel 441f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 442f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 443f4e9af4fSAkash Goel POSTING_READ(reg); 444f4e9af4fSAkash Goel } 445f4e9af4fSAkash Goel 4463814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447f4e9af4fSAkash Goel { 44867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 449f4e9af4fSAkash Goel 450f4e9af4fSAkash Goel dev_priv->pm_ier |= enable_mask; 451f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452f4e9af4fSAkash Goel gen6_unmask_pm_irq(dev_priv, enable_mask); 453f4e9af4fSAkash Goel /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454f4e9af4fSAkash Goel } 455f4e9af4fSAkash Goel 4563814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457f4e9af4fSAkash Goel { 45867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 459f4e9af4fSAkash Goel 460f4e9af4fSAkash Goel dev_priv->pm_ier &= ~disable_mask; 461f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, disable_mask); 462f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463f4e9af4fSAkash Goel /* though a barrier is missing here, but don't really need a one */ 464edbfdb45SPaulo Zanoni } 465edbfdb45SPaulo Zanoni 466d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467d02b98b8SOscar Mateo { 468d02b98b8SOscar Mateo spin_lock_irq(&dev_priv->irq_lock); 469d02b98b8SOscar Mateo 47096606f3bSOscar Mateo while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 47196606f3bSOscar Mateo ; 472d02b98b8SOscar Mateo 473d02b98b8SOscar Mateo dev_priv->gt_pm.rps.pm_iir = 0; 474d02b98b8SOscar Mateo 475d02b98b8SOscar Mateo spin_unlock_irq(&dev_priv->irq_lock); 476d02b98b8SOscar Mateo } 477d02b98b8SOscar Mateo 478dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 4793cc134e3SImre Deak { 4803cc134e3SImre Deak spin_lock_irq(&dev_priv->irq_lock); 4814668f695SChris Wilson gen6_reset_pm_iir(dev_priv, GEN6_PM_RPS_EVENTS); 482562d9baeSSagar Arun Kamble dev_priv->gt_pm.rps.pm_iir = 0; 4833cc134e3SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 4843cc134e3SImre Deak } 4853cc134e3SImre Deak 48691d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487b900b949SImre Deak { 488562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 489562d9baeSSagar Arun Kamble 490562d9baeSSagar Arun Kamble if (READ_ONCE(rps->interrupts_enabled)) 491f2a91d1aSChris Wilson return; 492f2a91d1aSChris Wilson 493b900b949SImre Deak spin_lock_irq(&dev_priv->irq_lock); 494562d9baeSSagar Arun Kamble WARN_ON_ONCE(rps->pm_iir); 49596606f3bSOscar Mateo 496d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 49796606f3bSOscar Mateo WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498d02b98b8SOscar Mateo else 499c33d247dSChris Wilson WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 50096606f3bSOscar Mateo 501562d9baeSSagar Arun Kamble rps->interrupts_enabled = true; 502b900b949SImre Deak gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 50378e68d36SImre Deak 504b900b949SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 505b900b949SImre Deak } 506b900b949SImre Deak 50791d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508b900b949SImre Deak { 509562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 510562d9baeSSagar Arun Kamble 511562d9baeSSagar Arun Kamble if (!READ_ONCE(rps->interrupts_enabled)) 512f2a91d1aSChris Wilson return; 513f2a91d1aSChris Wilson 514d4d70aa5SImre Deak spin_lock_irq(&dev_priv->irq_lock); 515562d9baeSSagar Arun Kamble rps->interrupts_enabled = false; 5169939fba2SImre Deak 517b20e3cfeSDave Gordon I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 5189939fba2SImre Deak 5194668f695SChris Wilson gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 52058072ccbSImre Deak 52158072ccbSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 52291c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 523c33d247dSChris Wilson 524c33d247dSChris Wilson /* Now that we will not be generating any more work, flush any 5253814fd77SOscar Mateo * outstanding tasks. As we are called on the RPS idle path, 526c33d247dSChris Wilson * we will reset the GPU to minimum frequencies, so the current 527c33d247dSChris Wilson * state of the worker can be discarded. 528c33d247dSChris Wilson */ 529562d9baeSSagar Arun Kamble cancel_work_sync(&rps->work); 530d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 531d02b98b8SOscar Mateo gen11_reset_rps_interrupts(dev_priv); 532d02b98b8SOscar Mateo else 533c33d247dSChris Wilson gen6_reset_rps_interrupts(dev_priv); 534b900b949SImre Deak } 535b900b949SImre Deak 53626705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 53726705e20SSagar Arun Kamble { 5381be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5391be333d3SSagar Arun Kamble 54026705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 54126705e20SSagar Arun Kamble gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 54226705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 54326705e20SSagar Arun Kamble } 54426705e20SSagar Arun Kamble 54526705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 54626705e20SSagar Arun Kamble { 5471be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5481be333d3SSagar Arun Kamble 54926705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 55026705e20SSagar Arun Kamble if (!dev_priv->guc.interrupts_enabled) { 55126705e20SSagar Arun Kamble WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 55226705e20SSagar Arun Kamble dev_priv->pm_guc_events); 55326705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = true; 55426705e20SSagar Arun Kamble gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 55526705e20SSagar Arun Kamble } 55626705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 55726705e20SSagar Arun Kamble } 55826705e20SSagar Arun Kamble 55926705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 56026705e20SSagar Arun Kamble { 5611be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5621be333d3SSagar Arun Kamble 56326705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 56426705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = false; 56526705e20SSagar Arun Kamble 56626705e20SSagar Arun Kamble gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 56726705e20SSagar Arun Kamble 56826705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 56926705e20SSagar Arun Kamble synchronize_irq(dev_priv->drm.irq); 57026705e20SSagar Arun Kamble 57126705e20SSagar Arun Kamble gen9_reset_guc_interrupts(dev_priv); 57226705e20SSagar Arun Kamble } 57326705e20SSagar Arun Kamble 5740961021aSBen Widawsky /** 5753a3b3c7dSVille Syrjälä * bdw_update_port_irq - update DE port interrupt 5763a3b3c7dSVille Syrjälä * @dev_priv: driver private 5773a3b3c7dSVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 5783a3b3c7dSVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 5793a3b3c7dSVille Syrjälä */ 5803a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 5813a3b3c7dSVille Syrjälä uint32_t interrupt_mask, 5823a3b3c7dSVille Syrjälä uint32_t enabled_irq_mask) 5833a3b3c7dSVille Syrjälä { 5843a3b3c7dSVille Syrjälä uint32_t new_val; 5853a3b3c7dSVille Syrjälä uint32_t old_val; 5863a3b3c7dSVille Syrjälä 58767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 5883a3b3c7dSVille Syrjälä 5893a3b3c7dSVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 5903a3b3c7dSVille Syrjälä 5913a3b3c7dSVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 5923a3b3c7dSVille Syrjälä return; 5933a3b3c7dSVille Syrjälä 5943a3b3c7dSVille Syrjälä old_val = I915_READ(GEN8_DE_PORT_IMR); 5953a3b3c7dSVille Syrjälä 5963a3b3c7dSVille Syrjälä new_val = old_val; 5973a3b3c7dSVille Syrjälä new_val &= ~interrupt_mask; 5983a3b3c7dSVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 5993a3b3c7dSVille Syrjälä 6003a3b3c7dSVille Syrjälä if (new_val != old_val) { 6013a3b3c7dSVille Syrjälä I915_WRITE(GEN8_DE_PORT_IMR, new_val); 6023a3b3c7dSVille Syrjälä POSTING_READ(GEN8_DE_PORT_IMR); 6033a3b3c7dSVille Syrjälä } 6043a3b3c7dSVille Syrjälä } 6053a3b3c7dSVille Syrjälä 6063a3b3c7dSVille Syrjälä /** 607013d3752SVille Syrjälä * bdw_update_pipe_irq - update DE pipe interrupt 608013d3752SVille Syrjälä * @dev_priv: driver private 609013d3752SVille Syrjälä * @pipe: pipe whose interrupt to update 610013d3752SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 611013d3752SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 612013d3752SVille Syrjälä */ 613013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614013d3752SVille Syrjälä enum pipe pipe, 615013d3752SVille Syrjälä uint32_t interrupt_mask, 616013d3752SVille Syrjälä uint32_t enabled_irq_mask) 617013d3752SVille Syrjälä { 618013d3752SVille Syrjälä uint32_t new_val; 619013d3752SVille Syrjälä 62067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 621013d3752SVille Syrjälä 622013d3752SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 623013d3752SVille Syrjälä 624013d3752SVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625013d3752SVille Syrjälä return; 626013d3752SVille Syrjälä 627013d3752SVille Syrjälä new_val = dev_priv->de_irq_mask[pipe]; 628013d3752SVille Syrjälä new_val &= ~interrupt_mask; 629013d3752SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 630013d3752SVille Syrjälä 631013d3752SVille Syrjälä if (new_val != dev_priv->de_irq_mask[pipe]) { 632013d3752SVille Syrjälä dev_priv->de_irq_mask[pipe] = new_val; 633013d3752SVille Syrjälä I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634013d3752SVille Syrjälä POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635013d3752SVille Syrjälä } 636013d3752SVille Syrjälä } 637013d3752SVille Syrjälä 638013d3752SVille Syrjälä /** 639fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 640fee884edSDaniel Vetter * @dev_priv: driver private 641fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 642fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 643fee884edSDaniel Vetter */ 64447339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645fee884edSDaniel Vetter uint32_t interrupt_mask, 646fee884edSDaniel Vetter uint32_t enabled_irq_mask) 647fee884edSDaniel Vetter { 648fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 649fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 650fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 651fee884edSDaniel Vetter 65215a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 65315a17aaeSDaniel Vetter 65467520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 655fee884edSDaniel Vetter 6569df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657c67a470bSPaulo Zanoni return; 658c67a470bSPaulo Zanoni 659fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 660fee884edSDaniel Vetter POSTING_READ(SDEIMR); 661fee884edSDaniel Vetter } 6628664281bSPaulo Zanoni 6636b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 6646b12ca56SVille Syrjälä enum pipe pipe) 6657c463586SKeith Packard { 6666b12ca56SVille Syrjälä u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 66710c59c51SImre Deak u32 enable_mask = status_mask << 16; 66810c59c51SImre Deak 6696b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 6706b12ca56SVille Syrjälä 6716b12ca56SVille Syrjälä if (INTEL_GEN(dev_priv) < 5) 6726b12ca56SVille Syrjälä goto out; 6736b12ca56SVille Syrjälä 67410c59c51SImre Deak /* 675724a6905SVille Syrjälä * On pipe A we don't support the PSR interrupt yet, 676724a6905SVille Syrjälä * on pipe B and C the same bit MBZ. 67710c59c51SImre Deak */ 67810c59c51SImre Deak if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 67910c59c51SImre Deak return 0; 680724a6905SVille Syrjälä /* 681724a6905SVille Syrjälä * On pipe B and C we don't support the PSR interrupt yet, on pipe 682724a6905SVille Syrjälä * A the same bit is for perf counters which we don't use either. 683724a6905SVille Syrjälä */ 684724a6905SVille Syrjälä if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685724a6905SVille Syrjälä return 0; 68610c59c51SImre Deak 68710c59c51SImre Deak enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 68810c59c51SImre Deak SPRITE0_FLIP_DONE_INT_EN_VLV | 68910c59c51SImre Deak SPRITE1_FLIP_DONE_INT_EN_VLV); 69010c59c51SImre Deak if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 69110c59c51SImre Deak enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 69210c59c51SImre Deak if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 69310c59c51SImre Deak enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 69410c59c51SImre Deak 6956b12ca56SVille Syrjälä out: 6966b12ca56SVille Syrjälä WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 6976b12ca56SVille Syrjälä status_mask & ~PIPESTAT_INT_STATUS_MASK, 6986b12ca56SVille Syrjälä "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 6996b12ca56SVille Syrjälä pipe_name(pipe), enable_mask, status_mask); 7006b12ca56SVille Syrjälä 70110c59c51SImre Deak return enable_mask; 70210c59c51SImre Deak } 70310c59c51SImre Deak 7046b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv, 7056b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 706755e9019SImre Deak { 7076b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 708755e9019SImre Deak u32 enable_mask; 709755e9019SImre Deak 7106b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7116b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7126b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7136b12ca56SVille Syrjälä 7146b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7156b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7166b12ca56SVille Syrjälä 7176b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 7186b12ca56SVille Syrjälä return; 7196b12ca56SVille Syrjälä 7206b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] |= status_mask; 7216b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7226b12ca56SVille Syrjälä 7236b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7246b12ca56SVille Syrjälä POSTING_READ(reg); 725755e9019SImre Deak } 726755e9019SImre Deak 7276b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv, 7286b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 729755e9019SImre Deak { 7306b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 731755e9019SImre Deak u32 enable_mask; 732755e9019SImre Deak 7336b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7346b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7356b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7366b12ca56SVille Syrjälä 7376b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7386b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7396b12ca56SVille Syrjälä 7406b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 7416b12ca56SVille Syrjälä return; 7426b12ca56SVille Syrjälä 7436b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 7446b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7456b12ca56SVille Syrjälä 7466b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7476b12ca56SVille Syrjälä POSTING_READ(reg); 748755e9019SImre Deak } 749755e9019SImre Deak 750c0e09200SDave Airlie /** 751f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 75214bb2c11STvrtko Ursulin * @dev_priv: i915 device private 75301c66889SZhao Yakui */ 75491d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 75501c66889SZhao Yakui { 75691d14251STvrtko Ursulin if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757f49e38ddSJani Nikula return; 758f49e38ddSJani Nikula 75913321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 76001c66889SZhao Yakui 761755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 76291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 4) 7633b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, 764755e9019SImre Deak PIPE_LEGACY_BLC_EVENT_STATUS); 7651ec14ad3SChris Wilson 76613321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 76701c66889SZhao Yakui } 76801c66889SZhao Yakui 769f75f3746SVille Syrjälä /* 770f75f3746SVille Syrjälä * This timing diagram depicts the video signal in and 771f75f3746SVille Syrjälä * around the vertical blanking period. 772f75f3746SVille Syrjälä * 773f75f3746SVille Syrjälä * Assumptions about the fictitious mode used in this example: 774f75f3746SVille Syrjälä * vblank_start >= 3 775f75f3746SVille Syrjälä * vsync_start = vblank_start + 1 776f75f3746SVille Syrjälä * vsync_end = vblank_start + 2 777f75f3746SVille Syrjälä * vtotal = vblank_start + 3 778f75f3746SVille Syrjälä * 779f75f3746SVille Syrjälä * start of vblank: 780f75f3746SVille Syrjälä * latch double buffered registers 781f75f3746SVille Syrjälä * increment frame counter (ctg+) 782f75f3746SVille Syrjälä * generate start of vblank interrupt (gen4+) 783f75f3746SVille Syrjälä * | 784f75f3746SVille Syrjälä * | frame start: 785f75f3746SVille Syrjälä * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786f75f3746SVille Syrjälä * | may be shifted forward 1-3 extra lines via PIPECONF 787f75f3746SVille Syrjälä * | | 788f75f3746SVille Syrjälä * | | start of vsync: 789f75f3746SVille Syrjälä * | | generate vsync interrupt 790f75f3746SVille Syrjälä * | | | 791f75f3746SVille Syrjälä * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792f75f3746SVille Syrjälä * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793f75f3746SVille Syrjälä * ----va---> <-----------------vb--------------------> <--------va------------- 794f75f3746SVille Syrjälä * | | <----vs-----> | 795f75f3746SVille Syrjälä * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796f75f3746SVille Syrjälä * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797f75f3746SVille Syrjälä * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798f75f3746SVille Syrjälä * | | | 799f75f3746SVille Syrjälä * last visible pixel first visible pixel 800f75f3746SVille Syrjälä * | increment frame counter (gen3/4) 801f75f3746SVille Syrjälä * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802f75f3746SVille Syrjälä * 803f75f3746SVille Syrjälä * x = horizontal active 804f75f3746SVille Syrjälä * _ = horizontal blanking 805f75f3746SVille Syrjälä * hs = horizontal sync 806f75f3746SVille Syrjälä * va = vertical active 807f75f3746SVille Syrjälä * vb = vertical blanking 808f75f3746SVille Syrjälä * vs = vertical sync 809f75f3746SVille Syrjälä * vbs = vblank_start (number) 810f75f3746SVille Syrjälä * 811f75f3746SVille Syrjälä * Summary: 812f75f3746SVille Syrjälä * - most events happen at the start of horizontal sync 813f75f3746SVille Syrjälä * - frame start happens at the start of horizontal blank, 1-4 lines 814f75f3746SVille Syrjälä * (depending on PIPECONF settings) after the start of vblank 815f75f3746SVille Syrjälä * - gen3/4 pixel and frame counter are synchronized with the start 816f75f3746SVille Syrjälä * of horizontal active on the first line of vertical active 817f75f3746SVille Syrjälä */ 818f75f3746SVille Syrjälä 81942f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 82042f52ef8SKeith Packard * we use as a pipe index 82142f52ef8SKeith Packard */ 82288e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8230a3e67a4SJesse Barnes { 824fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 825f0f59a00SVille Syrjälä i915_reg_t high_frame, low_frame; 8260b2a8e09SVille Syrjälä u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 8275caa0feaSDaniel Vetter const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828694e409dSVille Syrjälä unsigned long irqflags; 829391f75e2SVille Syrjälä 8300b2a8e09SVille Syrjälä htotal = mode->crtc_htotal; 8310b2a8e09SVille Syrjälä hsync_start = mode->crtc_hsync_start; 8320b2a8e09SVille Syrjälä vbl_start = mode->crtc_vblank_start; 8330b2a8e09SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 8340b2a8e09SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 835391f75e2SVille Syrjälä 8360b2a8e09SVille Syrjälä /* Convert to pixel count */ 8370b2a8e09SVille Syrjälä vbl_start *= htotal; 8380b2a8e09SVille Syrjälä 8390b2a8e09SVille Syrjälä /* Start of vblank event occurs at start of hsync */ 8400b2a8e09SVille Syrjälä vbl_start -= htotal - hsync_start; 8410b2a8e09SVille Syrjälä 8429db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 8439db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 8445eddb70bSChris Wilson 845694e409dSVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846694e409dSVille Syrjälä 8470a3e67a4SJesse Barnes /* 8480a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 8490a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 8500a3e67a4SJesse Barnes * register. 8510a3e67a4SJesse Barnes */ 8520a3e67a4SJesse Barnes do { 853694e409dSVille Syrjälä high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854694e409dSVille Syrjälä low = I915_READ_FW(low_frame); 855694e409dSVille Syrjälä high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 8560a3e67a4SJesse Barnes } while (high1 != high2); 8570a3e67a4SJesse Barnes 858694e409dSVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859694e409dSVille Syrjälä 8605eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 861391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 8625eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 863391f75e2SVille Syrjälä 864391f75e2SVille Syrjälä /* 865391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 866391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 867391f75e2SVille Syrjälä * counter against vblank start. 868391f75e2SVille Syrjälä */ 869edc08d0aSVille Syrjälä return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 8700a3e67a4SJesse Barnes } 8710a3e67a4SJesse Barnes 872974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8739880b7a5SJesse Barnes { 874fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 8759880b7a5SJesse Barnes 876649636efSVille Syrjälä return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 8779880b7a5SJesse Barnes } 8789880b7a5SJesse Barnes 879aec0246fSUma Shankar /* 880aec0246fSUma Shankar * On certain encoders on certain platforms, pipe 881aec0246fSUma Shankar * scanline register will not work to get the scanline, 882aec0246fSUma Shankar * since the timings are driven from the PORT or issues 883aec0246fSUma Shankar * with scanline register updates. 884aec0246fSUma Shankar * This function will use Framestamp and current 885aec0246fSUma Shankar * timestamp registers to calculate the scanline. 886aec0246fSUma Shankar */ 887aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888aec0246fSUma Shankar { 889aec0246fSUma Shankar struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890aec0246fSUma Shankar struct drm_vblank_crtc *vblank = 891aec0246fSUma Shankar &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892aec0246fSUma Shankar const struct drm_display_mode *mode = &vblank->hwmode; 893aec0246fSUma Shankar u32 vblank_start = mode->crtc_vblank_start; 894aec0246fSUma Shankar u32 vtotal = mode->crtc_vtotal; 895aec0246fSUma Shankar u32 htotal = mode->crtc_htotal; 896aec0246fSUma Shankar u32 clock = mode->crtc_clock; 897aec0246fSUma Shankar u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898aec0246fSUma Shankar 899aec0246fSUma Shankar /* 900aec0246fSUma Shankar * To avoid the race condition where we might cross into the 901aec0246fSUma Shankar * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902aec0246fSUma Shankar * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903aec0246fSUma Shankar * during the same frame. 904aec0246fSUma Shankar */ 905aec0246fSUma Shankar do { 906aec0246fSUma Shankar /* 907aec0246fSUma Shankar * This field provides read back of the display 908aec0246fSUma Shankar * pipe frame time stamp. The time stamp value 909aec0246fSUma Shankar * is sampled at every start of vertical blank. 910aec0246fSUma Shankar */ 911aec0246fSUma Shankar scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912aec0246fSUma Shankar 913aec0246fSUma Shankar /* 914aec0246fSUma Shankar * The TIMESTAMP_CTR register has the current 915aec0246fSUma Shankar * time stamp value. 916aec0246fSUma Shankar */ 917aec0246fSUma Shankar scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918aec0246fSUma Shankar 919aec0246fSUma Shankar scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920aec0246fSUma Shankar } while (scan_post_time != scan_prev_time); 921aec0246fSUma Shankar 922aec0246fSUma Shankar scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923aec0246fSUma Shankar clock), 1000 * htotal); 924aec0246fSUma Shankar scanline = min(scanline, vtotal - 1); 925aec0246fSUma Shankar scanline = (scanline + vblank_start) % vtotal; 926aec0246fSUma Shankar 927aec0246fSUma Shankar return scanline; 928aec0246fSUma Shankar } 929aec0246fSUma Shankar 93075aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932a225f079SVille Syrjälä { 933a225f079SVille Syrjälä struct drm_device *dev = crtc->base.dev; 934fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 9355caa0feaSDaniel Vetter const struct drm_display_mode *mode; 9365caa0feaSDaniel Vetter struct drm_vblank_crtc *vblank; 937a225f079SVille Syrjälä enum pipe pipe = crtc->pipe; 93880715b2fSVille Syrjälä int position, vtotal; 939a225f079SVille Syrjälä 94072259536SVille Syrjälä if (!crtc->active) 94172259536SVille Syrjälä return -1; 94272259536SVille Syrjälä 9435caa0feaSDaniel Vetter vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 9445caa0feaSDaniel Vetter mode = &vblank->hwmode; 9455caa0feaSDaniel Vetter 946aec0246fSUma Shankar if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947aec0246fSUma Shankar return __intel_get_crtc_scanline_from_timestamp(crtc); 948aec0246fSUma Shankar 94980715b2fSVille Syrjälä vtotal = mode->crtc_vtotal; 950a225f079SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951a225f079SVille Syrjälä vtotal /= 2; 952a225f079SVille Syrjälä 953*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) 95475aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955a225f079SVille Syrjälä else 95675aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957a225f079SVille Syrjälä 958a225f079SVille Syrjälä /* 95941b578fbSJesse Barnes * On HSW, the DSL reg (0x70000) appears to return 0 if we 96041b578fbSJesse Barnes * read it just before the start of vblank. So try it again 96141b578fbSJesse Barnes * so we don't accidentally end up spanning a vblank frame 96241b578fbSJesse Barnes * increment, causing the pipe_update_end() code to squak at us. 96341b578fbSJesse Barnes * 96441b578fbSJesse Barnes * The nature of this problem means we can't simply check the ISR 96541b578fbSJesse Barnes * bit and return the vblank start value; nor can we use the scanline 96641b578fbSJesse Barnes * debug register in the transcoder as it appears to have the same 96741b578fbSJesse Barnes * problem. We may need to extend this to include other platforms, 96841b578fbSJesse Barnes * but so far testing only shows the problem on HSW. 96941b578fbSJesse Barnes */ 97091d14251STvrtko Ursulin if (HAS_DDI(dev_priv) && !position) { 97141b578fbSJesse Barnes int i, temp; 97241b578fbSJesse Barnes 97341b578fbSJesse Barnes for (i = 0; i < 100; i++) { 97441b578fbSJesse Barnes udelay(1); 975707bdd3fSVille Syrjälä temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 97641b578fbSJesse Barnes if (temp != position) { 97741b578fbSJesse Barnes position = temp; 97841b578fbSJesse Barnes break; 97941b578fbSJesse Barnes } 98041b578fbSJesse Barnes } 98141b578fbSJesse Barnes } 98241b578fbSJesse Barnes 98341b578fbSJesse Barnes /* 98480715b2fSVille Syrjälä * See update_scanline_offset() for the details on the 98580715b2fSVille Syrjälä * scanline_offset adjustment. 986a225f079SVille Syrjälä */ 98780715b2fSVille Syrjälä return (position + crtc->scanline_offset) % vtotal; 988a225f079SVille Syrjälä } 989a225f079SVille Syrjälä 9901bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 9911bf6ad62SDaniel Vetter bool in_vblank_irq, int *vpos, int *hpos, 9923bb403bfSVille Syrjälä ktime_t *stime, ktime_t *etime, 9933bb403bfSVille Syrjälä const struct drm_display_mode *mode) 9940af7e4dfSMario Kleiner { 995fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 99698187836SVille Syrjälä struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 99798187836SVille Syrjälä pipe); 9983aa18df8SVille Syrjälä int position; 99978e8fc6bSVille Syrjälä int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000ad3543edSMario Kleiner unsigned long irqflags; 10010af7e4dfSMario Kleiner 1002fc467a22SMaarten Lankhorst if (WARN_ON(!mode->crtc_clock)) { 10030af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 10049db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 10051bf6ad62SDaniel Vetter return false; 10060af7e4dfSMario Kleiner } 10070af7e4dfSMario Kleiner 1008c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 100978e8fc6bSVille Syrjälä hsync_start = mode->crtc_hsync_start; 1010c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 1011c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 1012c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 10130af7e4dfSMario Kleiner 1014d31faf65SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015d31faf65SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016d31faf65SVille Syrjälä vbl_end /= 2; 1017d31faf65SVille Syrjälä vtotal /= 2; 1018d31faf65SVille Syrjälä } 1019d31faf65SVille Syrjälä 1020ad3543edSMario Kleiner /* 1021ad3543edSMario Kleiner * Lock uncore.lock, as we will do multiple timing critical raw 1022ad3543edSMario Kleiner * register reads, potentially with preemption disabled, so the 1023ad3543edSMario Kleiner * following code must not block on uncore.lock. 1024ad3543edSMario Kleiner */ 1025ad3543edSMario Kleiner spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026ad3543edSMario Kleiner 1027ad3543edSMario Kleiner /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028ad3543edSMario Kleiner 1029ad3543edSMario Kleiner /* Get optional system timestamp before query. */ 1030ad3543edSMario Kleiner if (stime) 1031ad3543edSMario Kleiner *stime = ktime_get(); 1032ad3543edSMario Kleiner 1033*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10340af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 10350af7e4dfSMario Kleiner * scanout position from Display scan line register. 10360af7e4dfSMario Kleiner */ 1037a225f079SVille Syrjälä position = __intel_get_crtc_scanline(intel_crtc); 10380af7e4dfSMario Kleiner } else { 10390af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 10400af7e4dfSMario Kleiner * We can split this into vertical and horizontal 10410af7e4dfSMario Kleiner * scanout position. 10420af7e4dfSMario Kleiner */ 104375aa3f63SVille Syrjälä position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 10440af7e4dfSMario Kleiner 10453aa18df8SVille Syrjälä /* convert to pixel counts */ 10463aa18df8SVille Syrjälä vbl_start *= htotal; 10473aa18df8SVille Syrjälä vbl_end *= htotal; 10483aa18df8SVille Syrjälä vtotal *= htotal; 104978e8fc6bSVille Syrjälä 105078e8fc6bSVille Syrjälä /* 10517e78f1cbSVille Syrjälä * In interlaced modes, the pixel counter counts all pixels, 10527e78f1cbSVille Syrjälä * so one field will have htotal more pixels. In order to avoid 10537e78f1cbSVille Syrjälä * the reported position from jumping backwards when the pixel 10547e78f1cbSVille Syrjälä * counter is beyond the length of the shorter field, just 10557e78f1cbSVille Syrjälä * clamp the position the length of the shorter field. This 10567e78f1cbSVille Syrjälä * matches how the scanline counter based position works since 10577e78f1cbSVille Syrjälä * the scanline counter doesn't count the two half lines. 10587e78f1cbSVille Syrjälä */ 10597e78f1cbSVille Syrjälä if (position >= vtotal) 10607e78f1cbSVille Syrjälä position = vtotal - 1; 10617e78f1cbSVille Syrjälä 10627e78f1cbSVille Syrjälä /* 106378e8fc6bSVille Syrjälä * Start of vblank interrupt is triggered at start of hsync, 106478e8fc6bSVille Syrjälä * just prior to the first active line of vblank. However we 106578e8fc6bSVille Syrjälä * consider lines to start at the leading edge of horizontal 106678e8fc6bSVille Syrjälä * active. So, should we get here before we've crossed into 106778e8fc6bSVille Syrjälä * the horizontal active of the first line in vblank, we would 106878e8fc6bSVille Syrjälä * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 106978e8fc6bSVille Syrjälä * always add htotal-hsync_start to the current pixel position. 107078e8fc6bSVille Syrjälä */ 107178e8fc6bSVille Syrjälä position = (position + htotal - hsync_start) % vtotal; 10723aa18df8SVille Syrjälä } 10733aa18df8SVille Syrjälä 1074ad3543edSMario Kleiner /* Get optional system timestamp after query. */ 1075ad3543edSMario Kleiner if (etime) 1076ad3543edSMario Kleiner *etime = ktime_get(); 1077ad3543edSMario Kleiner 1078ad3543edSMario Kleiner /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079ad3543edSMario Kleiner 1080ad3543edSMario Kleiner spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081ad3543edSMario Kleiner 10823aa18df8SVille Syrjälä /* 10833aa18df8SVille Syrjälä * While in vblank, position will be negative 10843aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 10853aa18df8SVille Syrjälä * vblank, position will be positive counting 10863aa18df8SVille Syrjälä * up since vbl_end. 10873aa18df8SVille Syrjälä */ 10883aa18df8SVille Syrjälä if (position >= vbl_start) 10893aa18df8SVille Syrjälä position -= vbl_end; 10903aa18df8SVille Syrjälä else 10913aa18df8SVille Syrjälä position += vtotal - vbl_end; 10923aa18df8SVille Syrjälä 1093*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10943aa18df8SVille Syrjälä *vpos = position; 10953aa18df8SVille Syrjälä *hpos = 0; 10963aa18df8SVille Syrjälä } else { 10970af7e4dfSMario Kleiner *vpos = position / htotal; 10980af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 10990af7e4dfSMario Kleiner } 11000af7e4dfSMario Kleiner 11011bf6ad62SDaniel Vetter return true; 11020af7e4dfSMario Kleiner } 11030af7e4dfSMario Kleiner 1104a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105a225f079SVille Syrjälä { 1106fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107a225f079SVille Syrjälä unsigned long irqflags; 1108a225f079SVille Syrjälä int position; 1109a225f079SVille Syrjälä 1110a225f079SVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111a225f079SVille Syrjälä position = __intel_get_crtc_scanline(crtc); 1112a225f079SVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113a225f079SVille Syrjälä 1114a225f079SVille Syrjälä return position; 1115a225f079SVille Syrjälä } 1116a225f079SVille Syrjälä 111791d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118f97108d1SJesse Barnes { 1119b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 11209270388eSDaniel Vetter u8 new_delay; 11219270388eSDaniel Vetter 1122d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 1123f97108d1SJesse Barnes 112473edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 112573edd18fSDaniel Vetter 112620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 11279270388eSDaniel Vetter 11287648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 1130b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 1131f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 1132f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 1133f97108d1SJesse Barnes 1134f97108d1SJesse Barnes /* Handle RCS change request from hw */ 1135b5b72e89SMatthew Garrett if (busy_up > max_avg) { 113620e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 113720e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 113820e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 113920e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 1140b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 114120e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 114220e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 114320e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 114420e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 1145f97108d1SJesse Barnes } 1146f97108d1SJesse Barnes 114791d14251STvrtko Ursulin if (ironlake_set_drps(dev_priv, new_delay)) 114820e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 1149f97108d1SJesse Barnes 1150d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 11519270388eSDaniel Vetter 1152f97108d1SJesse Barnes return; 1153f97108d1SJesse Barnes } 1154f97108d1SJesse Barnes 11550bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine) 1156549f7365SChris Wilson { 11573f88325cSChris Wilson const u32 seqno = intel_engine_get_seqno(engine); 1158e61e0f51SChris Wilson struct i915_request *rq = NULL; 11593f88325cSChris Wilson struct task_struct *tsk = NULL; 116056299fb7SChris Wilson struct intel_wait *wait; 1161dffabc8fSTvrtko Ursulin 11623f88325cSChris Wilson if (unlikely(!engine->breadcrumbs.irq_armed)) 1163bcbd5c33SChris Wilson return; 1164bcbd5c33SChris Wilson 11653f88325cSChris Wilson rcu_read_lock(); 116656299fb7SChris Wilson 116761d3dc70SChris Wilson spin_lock(&engine->breadcrumbs.irq_lock); 116861d3dc70SChris Wilson wait = engine->breadcrumbs.irq_wait; 116956299fb7SChris Wilson if (wait) { 11703f88325cSChris Wilson /* 11713f88325cSChris Wilson * We use a callback from the dma-fence to submit 117256299fb7SChris Wilson * requests after waiting on our own requests. To 117356299fb7SChris Wilson * ensure minimum delay in queuing the next request to 117456299fb7SChris Wilson * hardware, signal the fence now rather than wait for 117556299fb7SChris Wilson * the signaler to be woken up. We still wake up the 117656299fb7SChris Wilson * waiter in order to handle the irq-seqno coherency 117756299fb7SChris Wilson * issues (we may receive the interrupt before the 117856299fb7SChris Wilson * seqno is written, see __i915_request_irq_complete()) 117956299fb7SChris Wilson * and to handle coalescing of multiple seqno updates 118056299fb7SChris Wilson * and many waiters. 118156299fb7SChris Wilson */ 11823f88325cSChris Wilson if (i915_seqno_passed(seqno, wait->seqno)) { 1183e61e0f51SChris Wilson struct i915_request *waiter = wait->request; 1184de4d2106SChris Wilson 1185e3be4079SChris Wilson if (waiter && 1186e3be4079SChris Wilson !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187de4d2106SChris Wilson &waiter->fence.flags) && 1188de4d2106SChris Wilson intel_wait_check_request(wait, waiter)) 1189e61e0f51SChris Wilson rq = i915_request_get(waiter); 119056299fb7SChris Wilson 11913f88325cSChris Wilson tsk = wait->tsk; 11923f88325cSChris Wilson } else { 119369dc4d00SChris Wilson if (engine->irq_seqno_barrier && 119469dc4d00SChris Wilson i915_seqno_passed(seqno, wait->seqno - 1)) { 11953f88325cSChris Wilson set_bit(ENGINE_IRQ_BREADCRUMB, 11963f88325cSChris Wilson &engine->irq_posted); 11973f88325cSChris Wilson tsk = wait->tsk; 11983f88325cSChris Wilson } 11993f88325cSChris Wilson } 120078796877SChris Wilson 120178796877SChris Wilson engine->breadcrumbs.irq_count++; 120267b807a8SChris Wilson } else { 1203bcbd5c33SChris Wilson if (engine->breadcrumbs.irq_armed) 120467b807a8SChris Wilson __intel_engine_disarm_breadcrumbs(engine); 120556299fb7SChris Wilson } 120661d3dc70SChris Wilson spin_unlock(&engine->breadcrumbs.irq_lock); 120756299fb7SChris Wilson 120824754d75SChris Wilson if (rq) { 1209e3be4079SChris Wilson spin_lock(&rq->lock); 1210e3be4079SChris Wilson dma_fence_signal_locked(&rq->fence); 12114e9a8befSChris Wilson GEM_BUG_ON(!i915_request_completed(rq)); 1212e3be4079SChris Wilson spin_unlock(&rq->lock); 1213e3be4079SChris Wilson 1214e61e0f51SChris Wilson i915_request_put(rq); 121524754d75SChris Wilson } 121656299fb7SChris Wilson 12173f88325cSChris Wilson if (tsk && tsk->state & TASK_NORMAL) 12183f88325cSChris Wilson wake_up_process(tsk); 12193f88325cSChris Wilson 12203f88325cSChris Wilson rcu_read_unlock(); 12213f88325cSChris Wilson 122256299fb7SChris Wilson trace_intel_engine_notify(engine, wait); 1223549f7365SChris Wilson } 1224549f7365SChris Wilson 122543cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv, 122643cf3bf0SChris Wilson struct intel_rps_ei *ei) 122731685c25SDeepak S { 1228679cb6c1SMika Kuoppala ei->ktime = ktime_get_raw(); 122943cf3bf0SChris Wilson ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 123043cf3bf0SChris Wilson ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 123131685c25SDeepak S } 123231685c25SDeepak S 123343cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 123443cf3bf0SChris Wilson { 1235562d9baeSSagar Arun Kamble memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 123643cf3bf0SChris Wilson } 123743cf3bf0SChris Wilson 123843cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 123943cf3bf0SChris Wilson { 1240562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241562d9baeSSagar Arun Kamble const struct intel_rps_ei *prev = &rps->ei; 124243cf3bf0SChris Wilson struct intel_rps_ei now; 124343cf3bf0SChris Wilson u32 events = 0; 124443cf3bf0SChris Wilson 1245e0e8c7cbSChris Wilson if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 124643cf3bf0SChris Wilson return 0; 124743cf3bf0SChris Wilson 124843cf3bf0SChris Wilson vlv_c0_read(dev_priv, &now); 124931685c25SDeepak S 1250679cb6c1SMika Kuoppala if (prev->ktime) { 1251e0e8c7cbSChris Wilson u64 time, c0; 1252569884e3SChris Wilson u32 render, media; 1253e0e8c7cbSChris Wilson 1254679cb6c1SMika Kuoppala time = ktime_us_delta(now.ktime, prev->ktime); 12558f68d591SChris Wilson 1256e0e8c7cbSChris Wilson time *= dev_priv->czclk_freq; 1257e0e8c7cbSChris Wilson 1258e0e8c7cbSChris Wilson /* Workload can be split between render + media, 1259e0e8c7cbSChris Wilson * e.g. SwapBuffers being blitted in X after being rendered in 1260e0e8c7cbSChris Wilson * mesa. To account for this we need to combine both engines 1261e0e8c7cbSChris Wilson * into our activity counter. 1262e0e8c7cbSChris Wilson */ 1263569884e3SChris Wilson render = now.render_c0 - prev->render_c0; 1264569884e3SChris Wilson media = now.media_c0 - prev->media_c0; 1265569884e3SChris Wilson c0 = max(render, media); 12666b7f6aa7SMika Kuoppala c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267e0e8c7cbSChris Wilson 126860548c55SChris Wilson if (c0 > time * rps->power.up_threshold) 1269e0e8c7cbSChris Wilson events = GEN6_PM_RP_UP_THRESHOLD; 127060548c55SChris Wilson else if (c0 < time * rps->power.down_threshold) 1271e0e8c7cbSChris Wilson events = GEN6_PM_RP_DOWN_THRESHOLD; 127231685c25SDeepak S } 127331685c25SDeepak S 1274562d9baeSSagar Arun Kamble rps->ei = now; 127543cf3bf0SChris Wilson return events; 127631685c25SDeepak S } 127731685c25SDeepak S 12784912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 12793b8d8d91SJesse Barnes { 12802d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1281562d9baeSSagar Arun Kamble container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 12837c0a16adSChris Wilson bool client_boost = false; 12848d3afd7dSChris Wilson int new_delay, adj, min, max; 12857c0a16adSChris Wilson u32 pm_iir = 0; 12863b8d8d91SJesse Barnes 128759cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1288562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1289562d9baeSSagar Arun Kamble pm_iir = fetch_and_zero(&rps->pm_iir); 1290562d9baeSSagar Arun Kamble client_boost = atomic_read(&rps->num_waiters); 1291d4d70aa5SImre Deak } 129259cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 12934912d041SBen Widawsky 129460611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 1295a6706b45SDeepak S WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 12968d3afd7dSChris Wilson if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 12977c0a16adSChris Wilson goto out; 12983b8d8d91SJesse Barnes 12999f817501SSagar Arun Kamble mutex_lock(&dev_priv->pcu_lock); 13007b9e0ae6SChris Wilson 130143cf3bf0SChris Wilson pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 130243cf3bf0SChris Wilson 1303562d9baeSSagar Arun Kamble adj = rps->last_adj; 1304562d9baeSSagar Arun Kamble new_delay = rps->cur_freq; 1305562d9baeSSagar Arun Kamble min = rps->min_freq_softlimit; 1306562d9baeSSagar Arun Kamble max = rps->max_freq_softlimit; 13077b92c1bdSChris Wilson if (client_boost) 1308562d9baeSSagar Arun Kamble max = rps->max_freq; 1309562d9baeSSagar Arun Kamble if (client_boost && new_delay < rps->boost_freq) { 1310562d9baeSSagar Arun Kamble new_delay = rps->boost_freq; 13118d3afd7dSChris Wilson adj = 0; 13128d3afd7dSChris Wilson } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313dd75fdc8SChris Wilson if (adj > 0) 1314dd75fdc8SChris Wilson adj *= 2; 1315edcf284bSChris Wilson else /* CHV needs even encode values */ 1316edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 13177e79a683SSagar Arun Kamble 1318562d9baeSSagar Arun Kamble if (new_delay >= rps->max_freq_softlimit) 13197e79a683SSagar Arun Kamble adj = 0; 13207b92c1bdSChris Wilson } else if (client_boost) { 1321f5a4c67dSChris Wilson adj = 0; 1322dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323562d9baeSSagar Arun Kamble if (rps->cur_freq > rps->efficient_freq) 1324562d9baeSSagar Arun Kamble new_delay = rps->efficient_freq; 1325562d9baeSSagar Arun Kamble else if (rps->cur_freq > rps->min_freq_softlimit) 1326562d9baeSSagar Arun Kamble new_delay = rps->min_freq_softlimit; 1327dd75fdc8SChris Wilson adj = 0; 1328dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329dd75fdc8SChris Wilson if (adj < 0) 1330dd75fdc8SChris Wilson adj *= 2; 1331edcf284bSChris Wilson else /* CHV needs even encode values */ 1332edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 13337e79a683SSagar Arun Kamble 1334562d9baeSSagar Arun Kamble if (new_delay <= rps->min_freq_softlimit) 13357e79a683SSagar Arun Kamble adj = 0; 1336dd75fdc8SChris Wilson } else { /* unknown event */ 1337edcf284bSChris Wilson adj = 0; 1338dd75fdc8SChris Wilson } 13393b8d8d91SJesse Barnes 1340562d9baeSSagar Arun Kamble rps->last_adj = adj; 1341edcf284bSChris Wilson 134279249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 134379249636SBen Widawsky * interrupt 134479249636SBen Widawsky */ 1345edcf284bSChris Wilson new_delay += adj; 13468d3afd7dSChris Wilson new_delay = clamp_t(int, new_delay, min, max); 134727544369SDeepak S 13489fcee2f7SChris Wilson if (intel_set_rps(dev_priv, new_delay)) { 13499fcee2f7SChris Wilson DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350562d9baeSSagar Arun Kamble rps->last_adj = 0; 13519fcee2f7SChris Wilson } 13523b8d8d91SJesse Barnes 13539f817501SSagar Arun Kamble mutex_unlock(&dev_priv->pcu_lock); 13547c0a16adSChris Wilson 13557c0a16adSChris Wilson out: 13567c0a16adSChris Wilson /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 13577c0a16adSChris Wilson spin_lock_irq(&dev_priv->irq_lock); 1358562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) 13597c0a16adSChris Wilson gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 13607c0a16adSChris Wilson spin_unlock_irq(&dev_priv->irq_lock); 13613b8d8d91SJesse Barnes } 13623b8d8d91SJesse Barnes 1363e3689190SBen Widawsky 1364e3689190SBen Widawsky /** 1365e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366e3689190SBen Widawsky * occurred. 1367e3689190SBen Widawsky * @work: workqueue struct 1368e3689190SBen Widawsky * 1369e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 1370e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 1371e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 1372e3689190SBen Widawsky */ 1373e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 1374e3689190SBen Widawsky { 13752d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1376cefcff8fSJoonas Lahtinen container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377e3689190SBen Widawsky u32 error_status, row, bank, subbank; 137835a85ac6SBen Widawsky char *parity_event[6]; 1379e3689190SBen Widawsky uint32_t misccpctl; 138035a85ac6SBen Widawsky uint8_t slice = 0; 1381e3689190SBen Widawsky 1382e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1383e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1384e3689190SBen Widawsky * any time we access those registers. 1385e3689190SBen Widawsky */ 138691c8a326SChris Wilson mutex_lock(&dev_priv->drm.struct_mutex); 1387e3689190SBen Widawsky 138835a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 138935a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 139035a85ac6SBen Widawsky goto out; 139135a85ac6SBen Widawsky 1392e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1393e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1395e3689190SBen Widawsky 139635a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397f0f59a00SVille Syrjälä i915_reg_t reg; 139835a85ac6SBen Widawsky 139935a85ac6SBen Widawsky slice--; 14002d1fe073SJoonas Lahtinen if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 140135a85ac6SBen Widawsky break; 140235a85ac6SBen Widawsky 140335a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 140435a85ac6SBen Widawsky 14056fa1c5f1SVille Syrjälä reg = GEN7_L3CDERRST1(slice); 140635a85ac6SBen Widawsky 140735a85ac6SBen Widawsky error_status = I915_READ(reg); 1408e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1409e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1410e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411e3689190SBen Widawsky 141235a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 141335a85ac6SBen Widawsky POSTING_READ(reg); 1414e3689190SBen Widawsky 1415cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 141935a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 142035a85ac6SBen Widawsky parity_event[5] = NULL; 1421e3689190SBen Widawsky 142291c8a326SChris Wilson kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1424e3689190SBen Widawsky 142535a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 142635a85ac6SBen Widawsky slice, row, bank, subbank); 1427e3689190SBen Widawsky 142835a85ac6SBen Widawsky kfree(parity_event[4]); 1429e3689190SBen Widawsky kfree(parity_event[3]); 1430e3689190SBen Widawsky kfree(parity_event[2]); 1431e3689190SBen Widawsky kfree(parity_event[1]); 1432e3689190SBen Widawsky } 1433e3689190SBen Widawsky 143435a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 143535a85ac6SBen Widawsky 143635a85ac6SBen Widawsky out: 143735a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 14384cb21832SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 14392d1fe073SJoonas Lahtinen gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 14404cb21832SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 144135a85ac6SBen Widawsky 144291c8a326SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 144335a85ac6SBen Widawsky } 144435a85ac6SBen Widawsky 1445261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446261e40b8SVille Syrjälä u32 iir) 1447e3689190SBen Widawsky { 1448261e40b8SVille Syrjälä if (!HAS_L3_DPF(dev_priv)) 1449e3689190SBen Widawsky return; 1450e3689190SBen Widawsky 1451d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1452261e40b8SVille Syrjälä gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1454e3689190SBen Widawsky 1455261e40b8SVille Syrjälä iir &= GT_PARITY_ERROR(dev_priv); 145635a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 145735a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 145835a85ac6SBen Widawsky 145935a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 146035a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 146135a85ac6SBen Widawsky 1462a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463e3689190SBen Widawsky } 1464e3689190SBen Widawsky 1465261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466f1af8fc1SPaulo Zanoni u32 gt_iir) 1467f1af8fc1SPaulo Zanoni { 1468f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14693b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1470f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 14713b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1472f1af8fc1SPaulo Zanoni } 1473f1af8fc1SPaulo Zanoni 1474261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475e7b4c6b1SDaniel Vetter u32 gt_iir) 1476e7b4c6b1SDaniel Vetter { 1477f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14783b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1479cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 14803b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1481cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 14823b3f1650SAkash Goel notify_ring(dev_priv->engine[BCS]); 1483e7b4c6b1SDaniel Vetter 1484cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1486aaecdf61SDaniel Vetter GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488e3689190SBen Widawsky 1489261e40b8SVille Syrjälä if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490261e40b8SVille Syrjälä ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491e7b4c6b1SDaniel Vetter } 1492e7b4c6b1SDaniel Vetter 14935d3d69d5SChris Wilson static void 149451f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495fbcc1a0cSNick Hoath { 149631de7350SChris Wilson bool tasklet = false; 1497f747026cSChris Wilson 1498fd8526e5SChris Wilson if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 14998ea397faSChris Wilson tasklet = true; 150031de7350SChris Wilson 150151f6b0f9SChris Wilson if (iir & GT_RENDER_USER_INTERRUPT) { 150231de7350SChris Wilson notify_ring(engine); 150393ffbe8eSMichal Wajdeczko tasklet |= USES_GUC_SUBMISSION(engine->i915); 150431de7350SChris Wilson } 150531de7350SChris Wilson 150631de7350SChris Wilson if (tasklet) 1507fd8526e5SChris Wilson tasklet_hi_schedule(&engine->execlists.tasklet); 1508fbcc1a0cSNick Hoath } 1509fbcc1a0cSNick Hoath 15102e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915, 151155ef72f2SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1512abd58f01SBen Widawsky { 15132e4a5b25SChris Wilson void __iomem * const regs = i915->regs; 15142e4a5b25SChris Wilson 1515f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516f0fd96f5SChris Wilson GEN8_GT_BCS_IRQ | \ 1517f0fd96f5SChris Wilson GEN8_GT_VCS1_IRQ | \ 1518f0fd96f5SChris Wilson GEN8_GT_VCS2_IRQ | \ 1519f0fd96f5SChris Wilson GEN8_GT_VECS_IRQ | \ 1520f0fd96f5SChris Wilson GEN8_GT_PM_IRQ | \ 1521f0fd96f5SChris Wilson GEN8_GT_GUC_IRQ) 1522f0fd96f5SChris Wilson 1523abd58f01SBen Widawsky if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15242e4a5b25SChris Wilson gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 15252e4a5b25SChris Wilson if (likely(gt_iir[0])) 15262e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527abd58f01SBen Widawsky } 1528abd58f01SBen Widawsky 152985f9b5f9SZhao Yakui if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15302e4a5b25SChris Wilson gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 15312e4a5b25SChris Wilson if (likely(gt_iir[1])) 15322e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 153374cdb337SChris Wilson } 153474cdb337SChris Wilson 153526705e20SSagar Arun Kamble if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15362e4a5b25SChris Wilson gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1537f4de7794SChris Wilson if (likely(gt_iir[2])) 1538f4de7794SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 15390961021aSBen Widawsky } 15402e4a5b25SChris Wilson 15412e4a5b25SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15422e4a5b25SChris Wilson gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 15432e4a5b25SChris Wilson if (likely(gt_iir[3])) 15442e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 154555ef72f2SChris Wilson } 1546abd58f01SBen Widawsky } 1547abd58f01SBen Widawsky 15482e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1549f0fd96f5SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1550e30e251aSVille Syrjälä { 1551f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15522e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[RCS], 155351f6b0f9SChris Wilson gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 15542e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[BCS], 155551f6b0f9SChris Wilson gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1556e30e251aSVille Syrjälä } 1557e30e251aSVille Syrjälä 1558f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15592e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS], 156051f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 15612e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS2], 156251f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1563e30e251aSVille Syrjälä } 1564e30e251aSVille Syrjälä 1565f0fd96f5SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15662e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VECS], 156751f6b0f9SChris Wilson gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1568f0fd96f5SChris Wilson } 1569e30e251aSVille Syrjälä 1570f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15712e4a5b25SChris Wilson gen6_rps_irq_handler(i915, gt_iir[2]); 15722e4a5b25SChris Wilson gen9_guc_irq_handler(i915, gt_iir[2]); 1573e30e251aSVille Syrjälä } 1574f0fd96f5SChris Wilson } 1575e30e251aSVille Syrjälä 1576af92058fSVille Syrjälä static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1577121e758eSDhinakaran Pandiyan { 1578af92058fSVille Syrjälä switch (pin) { 1579af92058fSVille Syrjälä case HPD_PORT_C: 1580121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1581af92058fSVille Syrjälä case HPD_PORT_D: 1582121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1583af92058fSVille Syrjälä case HPD_PORT_E: 1584121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1585af92058fSVille Syrjälä case HPD_PORT_F: 1586121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1587121e758eSDhinakaran Pandiyan default: 1588121e758eSDhinakaran Pandiyan return false; 1589121e758eSDhinakaran Pandiyan } 1590121e758eSDhinakaran Pandiyan } 1591121e758eSDhinakaran Pandiyan 1592af92058fSVille Syrjälä static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 159363c88d22SImre Deak { 1594af92058fSVille Syrjälä switch (pin) { 1595af92058fSVille Syrjälä case HPD_PORT_A: 1596195baa06SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1597af92058fSVille Syrjälä case HPD_PORT_B: 159863c88d22SImre Deak return val & PORTB_HOTPLUG_LONG_DETECT; 1599af92058fSVille Syrjälä case HPD_PORT_C: 160063c88d22SImre Deak return val & PORTC_HOTPLUG_LONG_DETECT; 160163c88d22SImre Deak default: 160263c88d22SImre Deak return false; 160363c88d22SImre Deak } 160463c88d22SImre Deak } 160563c88d22SImre Deak 1606af92058fSVille Syrjälä static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 160731604222SAnusha Srivatsa { 1608af92058fSVille Syrjälä switch (pin) { 1609af92058fSVille Syrjälä case HPD_PORT_A: 161031604222SAnusha Srivatsa return val & ICP_DDIA_HPD_LONG_DETECT; 1611af92058fSVille Syrjälä case HPD_PORT_B: 161231604222SAnusha Srivatsa return val & ICP_DDIB_HPD_LONG_DETECT; 161331604222SAnusha Srivatsa default: 161431604222SAnusha Srivatsa return false; 161531604222SAnusha Srivatsa } 161631604222SAnusha Srivatsa } 161731604222SAnusha Srivatsa 1618af92058fSVille Syrjälä static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 161931604222SAnusha Srivatsa { 1620af92058fSVille Syrjälä switch (pin) { 1621af92058fSVille Syrjälä case HPD_PORT_C: 162231604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1623af92058fSVille Syrjälä case HPD_PORT_D: 162431604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1625af92058fSVille Syrjälä case HPD_PORT_E: 162631604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1627af92058fSVille Syrjälä case HPD_PORT_F: 162831604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 162931604222SAnusha Srivatsa default: 163031604222SAnusha Srivatsa return false; 163131604222SAnusha Srivatsa } 163231604222SAnusha Srivatsa } 163331604222SAnusha Srivatsa 1634af92058fSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 16356dbf30ceSVille Syrjälä { 1636af92058fSVille Syrjälä switch (pin) { 1637af92058fSVille Syrjälä case HPD_PORT_E: 16386dbf30ceSVille Syrjälä return val & PORTE_HOTPLUG_LONG_DETECT; 16396dbf30ceSVille Syrjälä default: 16406dbf30ceSVille Syrjälä return false; 16416dbf30ceSVille Syrjälä } 16426dbf30ceSVille Syrjälä } 16436dbf30ceSVille Syrjälä 1644af92058fSVille Syrjälä static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 164574c0b395SVille Syrjälä { 1646af92058fSVille Syrjälä switch (pin) { 1647af92058fSVille Syrjälä case HPD_PORT_A: 164874c0b395SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 1649af92058fSVille Syrjälä case HPD_PORT_B: 165074c0b395SVille Syrjälä return val & PORTB_HOTPLUG_LONG_DETECT; 1651af92058fSVille Syrjälä case HPD_PORT_C: 165274c0b395SVille Syrjälä return val & PORTC_HOTPLUG_LONG_DETECT; 1653af92058fSVille Syrjälä case HPD_PORT_D: 165474c0b395SVille Syrjälä return val & PORTD_HOTPLUG_LONG_DETECT; 165574c0b395SVille Syrjälä default: 165674c0b395SVille Syrjälä return false; 165774c0b395SVille Syrjälä } 165874c0b395SVille Syrjälä } 165974c0b395SVille Syrjälä 1660af92058fSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1661e4ce95aaSVille Syrjälä { 1662af92058fSVille Syrjälä switch (pin) { 1663af92058fSVille Syrjälä case HPD_PORT_A: 1664e4ce95aaSVille Syrjälä return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1665e4ce95aaSVille Syrjälä default: 1666e4ce95aaSVille Syrjälä return false; 1667e4ce95aaSVille Syrjälä } 1668e4ce95aaSVille Syrjälä } 1669e4ce95aaSVille Syrjälä 1670af92058fSVille Syrjälä static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 167113cf5504SDave Airlie { 1672af92058fSVille Syrjälä switch (pin) { 1673af92058fSVille Syrjälä case HPD_PORT_B: 1674676574dfSJani Nikula return val & PORTB_HOTPLUG_LONG_DETECT; 1675af92058fSVille Syrjälä case HPD_PORT_C: 1676676574dfSJani Nikula return val & PORTC_HOTPLUG_LONG_DETECT; 1677af92058fSVille Syrjälä case HPD_PORT_D: 1678676574dfSJani Nikula return val & PORTD_HOTPLUG_LONG_DETECT; 1679676574dfSJani Nikula default: 1680676574dfSJani Nikula return false; 168113cf5504SDave Airlie } 168213cf5504SDave Airlie } 168313cf5504SDave Airlie 1684af92058fSVille Syrjälä static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 168513cf5504SDave Airlie { 1686af92058fSVille Syrjälä switch (pin) { 1687af92058fSVille Syrjälä case HPD_PORT_B: 1688676574dfSJani Nikula return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1689af92058fSVille Syrjälä case HPD_PORT_C: 1690676574dfSJani Nikula return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1691af92058fSVille Syrjälä case HPD_PORT_D: 1692676574dfSJani Nikula return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1693676574dfSJani Nikula default: 1694676574dfSJani Nikula return false; 169513cf5504SDave Airlie } 169613cf5504SDave Airlie } 169713cf5504SDave Airlie 169842db67d6SVille Syrjälä /* 169942db67d6SVille Syrjälä * Get a bit mask of pins that have triggered, and which ones may be long. 170042db67d6SVille Syrjälä * This can be called multiple times with the same masks to accumulate 170142db67d6SVille Syrjälä * hotplug detection results from several registers. 170242db67d6SVille Syrjälä * 170342db67d6SVille Syrjälä * Note that the caller is expected to zero out the masks initially. 170442db67d6SVille Syrjälä */ 1705cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1706cf53902fSRodrigo Vivi u32 *pin_mask, u32 *long_mask, 17078c841e57SJani Nikula u32 hotplug_trigger, u32 dig_hotplug_reg, 1708fd63e2a9SImre Deak const u32 hpd[HPD_NUM_PINS], 1709af92058fSVille Syrjälä bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1710676574dfSJani Nikula { 1711e9be2850SVille Syrjälä enum hpd_pin pin; 1712676574dfSJani Nikula 1713e9be2850SVille Syrjälä for_each_hpd_pin(pin) { 1714e9be2850SVille Syrjälä if ((hpd[pin] & hotplug_trigger) == 0) 17158c841e57SJani Nikula continue; 17168c841e57SJani Nikula 1717e9be2850SVille Syrjälä *pin_mask |= BIT(pin); 1718676574dfSJani Nikula 1719af92058fSVille Syrjälä if (long_pulse_detect(pin, dig_hotplug_reg)) 1720e9be2850SVille Syrjälä *long_mask |= BIT(pin); 1721676574dfSJani Nikula } 1722676574dfSJani Nikula 1723f88f0478SVille Syrjälä DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1724f88f0478SVille Syrjälä hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1725676574dfSJani Nikula 1726676574dfSJani Nikula } 1727676574dfSJani Nikula 172891d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1729515ac2bbSDaniel Vetter { 173028c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1731515ac2bbSDaniel Vetter } 1732515ac2bbSDaniel Vetter 173391d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1734ce99c256SDaniel Vetter { 17359ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1736ce99c256SDaniel Vetter } 1737ce99c256SDaniel Vetter 17388bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 173991d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 174091d14251STvrtko Ursulin enum pipe pipe, 1741eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1742eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 17438bc5e955SDaniel Vetter uint32_t crc4) 17448bf1e9f1SShuang He { 17458bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 17468c6b709dSTomeu Vizoso struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17478c6b709dSTomeu Vizoso uint32_t crcs[5]; 1748b2c88f5bSDamien Lespiau 1749d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 17508c6b709dSTomeu Vizoso /* 17518c6b709dSTomeu Vizoso * For some not yet identified reason, the first CRC is 17528c6b709dSTomeu Vizoso * bonkers. So let's just wait for the next vblank and read 17538c6b709dSTomeu Vizoso * out the buggy result. 17548c6b709dSTomeu Vizoso * 1755163e8aecSRodrigo Vivi * On GEN8+ sometimes the second CRC is bonkers as well, so 17568c6b709dSTomeu Vizoso * don't trust that one either. 17578c6b709dSTomeu Vizoso */ 1758033b7a23SMaarten Lankhorst if (pipe_crc->skipped <= 0 || 1759163e8aecSRodrigo Vivi (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 17608c6b709dSTomeu Vizoso pipe_crc->skipped++; 17618c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17628c6b709dSTomeu Vizoso return; 17638c6b709dSTomeu Vizoso } 17648c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17656cc42152SMaarten Lankhorst 17668c6b709dSTomeu Vizoso crcs[0] = crc0; 17678c6b709dSTomeu Vizoso crcs[1] = crc1; 17688c6b709dSTomeu Vizoso crcs[2] = crc2; 17698c6b709dSTomeu Vizoso crcs[3] = crc3; 17708c6b709dSTomeu Vizoso crcs[4] = crc4; 1771246ee524STomeu Vizoso drm_crtc_add_crc_entry(&crtc->base, true, 1772ca814b25SDaniel Vetter drm_crtc_accurate_vblank_count(&crtc->base), 1773246ee524STomeu Vizoso crcs); 17748c6b709dSTomeu Vizoso } 1775277de95eSDaniel Vetter #else 1776277de95eSDaniel Vetter static inline void 177791d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 177891d14251STvrtko Ursulin enum pipe pipe, 1779277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1780277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1781277de95eSDaniel Vetter uint32_t crc4) {} 1782277de95eSDaniel Vetter #endif 1783eba94eb9SDaniel Vetter 1784277de95eSDaniel Vetter 178591d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 178691d14251STvrtko Ursulin enum pipe pipe) 17875a69b89fSDaniel Vetter { 178891d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 17895a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 17905a69b89fSDaniel Vetter 0, 0, 0, 0); 17915a69b89fSDaniel Vetter } 17925a69b89fSDaniel Vetter 179391d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 179491d14251STvrtko Ursulin enum pipe pipe) 1795eba94eb9SDaniel Vetter { 179691d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 1797eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1798eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1799eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1800eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 18018bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1802eba94eb9SDaniel Vetter } 18035b3a856bSDaniel Vetter 180491d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 180591d14251STvrtko Ursulin enum pipe pipe) 18065b3a856bSDaniel Vetter { 18070b5c5ed0SDaniel Vetter uint32_t res1, res2; 18080b5c5ed0SDaniel Vetter 180991d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 3) 18100b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 18110b5c5ed0SDaniel Vetter else 18120b5c5ed0SDaniel Vetter res1 = 0; 18130b5c5ed0SDaniel Vetter 181491d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 18150b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 18160b5c5ed0SDaniel Vetter else 18170b5c5ed0SDaniel Vetter res2 = 0; 18185b3a856bSDaniel Vetter 181991d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 18200b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 18210b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 18220b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 18230b5c5ed0SDaniel Vetter res1, res2); 18245b3a856bSDaniel Vetter } 18258bf1e9f1SShuang He 18261403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 18271403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 18281403c0d4SPaulo Zanoni * the work queue. */ 18291403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1830baf02a1fSBen Widawsky { 1831562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1832562d9baeSSagar Arun Kamble 1833a6706b45SDeepak S if (pm_iir & dev_priv->pm_rps_events) { 183459cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 1835f4e9af4fSAkash Goel gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1836562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1837562d9baeSSagar Arun Kamble rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1838562d9baeSSagar Arun Kamble schedule_work(&rps->work); 183941a05a3aSDaniel Vetter } 1840d4d70aa5SImre Deak spin_unlock(&dev_priv->irq_lock); 1841d4d70aa5SImre Deak } 1842baf02a1fSBen Widawsky 1843bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 1844c9a9a268SImre Deak return; 1845c9a9a268SImre Deak 18462d1fe073SJoonas Lahtinen if (HAS_VEBOX(dev_priv)) { 184712638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 18483b3f1650SAkash Goel notify_ring(dev_priv->engine[VECS]); 184912638c57SBen Widawsky 1850aaecdf61SDaniel Vetter if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1851aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 185212638c57SBen Widawsky } 18531403c0d4SPaulo Zanoni } 1854baf02a1fSBen Widawsky 185526705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 185626705e20SSagar Arun Kamble { 185793bf8096SMichal Wajdeczko if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 185893bf8096SMichal Wajdeczko intel_guc_to_host_event_handler(&dev_priv->guc); 185926705e20SSagar Arun Kamble } 186026705e20SSagar Arun Kamble 186144d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 186244d9241eSVille Syrjälä { 186344d9241eSVille Syrjälä enum pipe pipe; 186444d9241eSVille Syrjälä 186544d9241eSVille Syrjälä for_each_pipe(dev_priv, pipe) { 186644d9241eSVille Syrjälä I915_WRITE(PIPESTAT(pipe), 186744d9241eSVille Syrjälä PIPESTAT_INT_STATUS_MASK | 186844d9241eSVille Syrjälä PIPE_FIFO_UNDERRUN_STATUS); 186944d9241eSVille Syrjälä 187044d9241eSVille Syrjälä dev_priv->pipestat_irq_mask[pipe] = 0; 187144d9241eSVille Syrjälä } 187244d9241eSVille Syrjälä } 187344d9241eSVille Syrjälä 1874eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 187591d14251STvrtko Ursulin u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 18767e231dbeSJesse Barnes { 18777e231dbeSJesse Barnes int pipe; 18787e231dbeSJesse Barnes 187958ead0d7SImre Deak spin_lock(&dev_priv->irq_lock); 18801ca993d2SVille Syrjälä 18811ca993d2SVille Syrjälä if (!dev_priv->display_irqs_enabled) { 18821ca993d2SVille Syrjälä spin_unlock(&dev_priv->irq_lock); 18831ca993d2SVille Syrjälä return; 18841ca993d2SVille Syrjälä } 18851ca993d2SVille Syrjälä 1886055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 1887f0f59a00SVille Syrjälä i915_reg_t reg; 18886b12ca56SVille Syrjälä u32 status_mask, enable_mask, iir_bit = 0; 188991d181ddSImre Deak 1890bbb5eebfSDaniel Vetter /* 1891bbb5eebfSDaniel Vetter * PIPESTAT bits get signalled even when the interrupt is 1892bbb5eebfSDaniel Vetter * disabled with the mask bits, and some of the status bits do 1893bbb5eebfSDaniel Vetter * not generate interrupts at all (like the underrun bit). Hence 1894bbb5eebfSDaniel Vetter * we need to be careful that we only handle what we want to 1895bbb5eebfSDaniel Vetter * handle. 1896bbb5eebfSDaniel Vetter */ 18970f239f4cSDaniel Vetter 18980f239f4cSDaniel Vetter /* fifo underruns are filterered in the underrun handler. */ 18996b12ca56SVille Syrjälä status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1900bbb5eebfSDaniel Vetter 1901bbb5eebfSDaniel Vetter switch (pipe) { 1902bbb5eebfSDaniel Vetter case PIPE_A: 1903bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1904bbb5eebfSDaniel Vetter break; 1905bbb5eebfSDaniel Vetter case PIPE_B: 1906bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1907bbb5eebfSDaniel Vetter break; 19083278f67fSVille Syrjälä case PIPE_C: 19093278f67fSVille Syrjälä iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 19103278f67fSVille Syrjälä break; 1911bbb5eebfSDaniel Vetter } 1912bbb5eebfSDaniel Vetter if (iir & iir_bit) 19136b12ca56SVille Syrjälä status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1914bbb5eebfSDaniel Vetter 19156b12ca56SVille Syrjälä if (!status_mask) 191691d181ddSImre Deak continue; 191791d181ddSImre Deak 191891d181ddSImre Deak reg = PIPESTAT(pipe); 19196b12ca56SVille Syrjälä pipe_stats[pipe] = I915_READ(reg) & status_mask; 19206b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 19217e231dbeSJesse Barnes 19227e231dbeSJesse Barnes /* 19237e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 1924132c27c9SVille Syrjälä * 1925132c27c9SVille Syrjälä * Toggle the enable bits to make sure we get an 1926132c27c9SVille Syrjälä * edge in the ISR pipe event bit if we don't clear 1927132c27c9SVille Syrjälä * all the enabled status bits. Otherwise the edge 1928132c27c9SVille Syrjälä * triggered IIR on i965/g4x wouldn't notice that 1929132c27c9SVille Syrjälä * an interrupt is still pending. 19307e231dbeSJesse Barnes */ 1931132c27c9SVille Syrjälä if (pipe_stats[pipe]) { 1932132c27c9SVille Syrjälä I915_WRITE(reg, pipe_stats[pipe]); 1933132c27c9SVille Syrjälä I915_WRITE(reg, enable_mask); 1934132c27c9SVille Syrjälä } 19357e231dbeSJesse Barnes } 193658ead0d7SImre Deak spin_unlock(&dev_priv->irq_lock); 19372ecb8ca4SVille Syrjälä } 19382ecb8ca4SVille Syrjälä 1939eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1940eb64343cSVille Syrjälä u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1941eb64343cSVille Syrjälä { 1942eb64343cSVille Syrjälä enum pipe pipe; 1943eb64343cSVille Syrjälä 1944eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1945eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1946eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1947eb64343cSVille Syrjälä 1948eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1949eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1950eb64343cSVille Syrjälä 1951eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1952eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1953eb64343cSVille Syrjälä } 1954eb64343cSVille Syrjälä } 1955eb64343cSVille Syrjälä 1956eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1957eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1958eb64343cSVille Syrjälä { 1959eb64343cSVille Syrjälä bool blc_event = false; 1960eb64343cSVille Syrjälä enum pipe pipe; 1961eb64343cSVille Syrjälä 1962eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1963eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1964eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1965eb64343cSVille Syrjälä 1966eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1967eb64343cSVille Syrjälä blc_event = true; 1968eb64343cSVille Syrjälä 1969eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1970eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1971eb64343cSVille Syrjälä 1972eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1973eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1974eb64343cSVille Syrjälä } 1975eb64343cSVille Syrjälä 1976eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1977eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 1978eb64343cSVille Syrjälä } 1979eb64343cSVille Syrjälä 1980eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1981eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1982eb64343cSVille Syrjälä { 1983eb64343cSVille Syrjälä bool blc_event = false; 1984eb64343cSVille Syrjälä enum pipe pipe; 1985eb64343cSVille Syrjälä 1986eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1987eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1988eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1989eb64343cSVille Syrjälä 1990eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1991eb64343cSVille Syrjälä blc_event = true; 1992eb64343cSVille Syrjälä 1993eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1994eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1995eb64343cSVille Syrjälä 1996eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1997eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1998eb64343cSVille Syrjälä } 1999eb64343cSVille Syrjälä 2000eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2001eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 2002eb64343cSVille Syrjälä 2003eb64343cSVille Syrjälä if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2004eb64343cSVille Syrjälä gmbus_irq_handler(dev_priv); 2005eb64343cSVille Syrjälä } 2006eb64343cSVille Syrjälä 200791d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 20082ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES]) 20092ecb8ca4SVille Syrjälä { 20102ecb8ca4SVille Syrjälä enum pipe pipe; 20117e231dbeSJesse Barnes 2012055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2013fd3a4024SDaniel Vetter if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2014fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 20154356d586SDaniel Vetter 20164356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 201791d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 20182d9d2b0bSVille Syrjälä 20191f7247c0SDaniel Vetter if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 20201f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 202131acc7f5SJesse Barnes } 202231acc7f5SJesse Barnes 2023c1874ed7SImre Deak if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 202491d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2025c1874ed7SImre Deak } 2026c1874ed7SImre Deak 20271ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 202816c6c56bSVille Syrjälä { 20290ba7c51aSVille Syrjälä u32 hotplug_status = 0, hotplug_status_mask; 20300ba7c51aSVille Syrjälä int i; 203116c6c56bSVille Syrjälä 20320ba7c51aSVille Syrjälä if (IS_G4X(dev_priv) || 20330ba7c51aSVille Syrjälä IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 20340ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 20350ba7c51aSVille Syrjälä DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 20360ba7c51aSVille Syrjälä else 20370ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 20380ba7c51aSVille Syrjälä 20390ba7c51aSVille Syrjälä /* 20400ba7c51aSVille Syrjälä * We absolutely have to clear all the pending interrupt 20410ba7c51aSVille Syrjälä * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 20420ba7c51aSVille Syrjälä * interrupt bit won't have an edge, and the i965/g4x 20430ba7c51aSVille Syrjälä * edge triggered IIR will not notice that an interrupt 20440ba7c51aSVille Syrjälä * is still pending. We can't use PORT_HOTPLUG_EN to 20450ba7c51aSVille Syrjälä * guarantee the edge as the act of toggling the enable 20460ba7c51aSVille Syrjälä * bits can itself generate a new hotplug interrupt :( 20470ba7c51aSVille Syrjälä */ 20480ba7c51aSVille Syrjälä for (i = 0; i < 10; i++) { 20490ba7c51aSVille Syrjälä u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 20500ba7c51aSVille Syrjälä 20510ba7c51aSVille Syrjälä if (tmp == 0) 20520ba7c51aSVille Syrjälä return hotplug_status; 20530ba7c51aSVille Syrjälä 20540ba7c51aSVille Syrjälä hotplug_status |= tmp; 20553ff60f89SOscar Mateo I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 20560ba7c51aSVille Syrjälä } 20570ba7c51aSVille Syrjälä 20580ba7c51aSVille Syrjälä WARN_ONCE(1, 20590ba7c51aSVille Syrjälä "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 20600ba7c51aSVille Syrjälä I915_READ(PORT_HOTPLUG_STAT)); 20611ae3c34cSVille Syrjälä 20621ae3c34cSVille Syrjälä return hotplug_status; 20631ae3c34cSVille Syrjälä } 20641ae3c34cSVille Syrjälä 206591d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 20661ae3c34cSVille Syrjälä u32 hotplug_status) 20671ae3c34cSVille Syrjälä { 20681ae3c34cSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 20693ff60f89SOscar Mateo 207091d14251STvrtko Ursulin if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 207191d14251STvrtko Ursulin IS_CHERRYVIEW(dev_priv)) { 207216c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 207316c6c56bSVille Syrjälä 207458f2cf24SVille Syrjälä if (hotplug_trigger) { 2075cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2076cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2077cf53902fSRodrigo Vivi hpd_status_g4x, 2078fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 207958f2cf24SVille Syrjälä 208091d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 208158f2cf24SVille Syrjälä } 2082369712e8SJani Nikula 2083369712e8SJani Nikula if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 208491d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 208516c6c56bSVille Syrjälä } else { 208616c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 208716c6c56bSVille Syrjälä 208858f2cf24SVille Syrjälä if (hotplug_trigger) { 2089cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2090cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2091cf53902fSRodrigo Vivi hpd_status_i915, 2092fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 209391d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 209416c6c56bSVille Syrjälä } 20953ff60f89SOscar Mateo } 209658f2cf24SVille Syrjälä } 209716c6c56bSVille Syrjälä 2098c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2099c1874ed7SImre Deak { 210045a83f84SDaniel Vetter struct drm_device *dev = arg; 2101fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2102c1874ed7SImre Deak irqreturn_t ret = IRQ_NONE; 2103c1874ed7SImre Deak 21042dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21052dd2a883SImre Deak return IRQ_NONE; 21062dd2a883SImre Deak 21071f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21081f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21091f814dacSImre Deak 21101e1cace9SVille Syrjälä do { 21116e814800SVille Syrjälä u32 iir, gt_iir, pm_iir; 21122ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21131ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2114a5e485a9SVille Syrjälä u32 ier = 0; 21153ff60f89SOscar Mateo 2116c1874ed7SImre Deak gt_iir = I915_READ(GTIIR); 2117c1874ed7SImre Deak pm_iir = I915_READ(GEN6_PMIIR); 21183ff60f89SOscar Mateo iir = I915_READ(VLV_IIR); 2119c1874ed7SImre Deak 2120c1874ed7SImre Deak if (gt_iir == 0 && pm_iir == 0 && iir == 0) 21211e1cace9SVille Syrjälä break; 2122c1874ed7SImre Deak 2123c1874ed7SImre Deak ret = IRQ_HANDLED; 2124c1874ed7SImre Deak 2125a5e485a9SVille Syrjälä /* 2126a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2127a5e485a9SVille Syrjälä * 2128a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2129a5e485a9SVille Syrjälä * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2130a5e485a9SVille Syrjälä * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2131a5e485a9SVille Syrjälä * 2132a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2133a5e485a9SVille Syrjälä * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2134a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2135a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2136a5e485a9SVille Syrjälä * bits this time around. 2137a5e485a9SVille Syrjälä */ 21384a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 2139a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2140a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 21414a0a0202SVille Syrjälä 21424a0a0202SVille Syrjälä if (gt_iir) 21434a0a0202SVille Syrjälä I915_WRITE(GTIIR, gt_iir); 21444a0a0202SVille Syrjälä if (pm_iir) 21454a0a0202SVille Syrjälä I915_WRITE(GEN6_PMIIR, pm_iir); 21464a0a0202SVille Syrjälä 21477ce4d1f2SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 21481ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 21497ce4d1f2SVille Syrjälä 21503ff60f89SOscar Mateo /* Call regardless, as some status bits might not be 21513ff60f89SOscar Mateo * signalled in iir */ 2152eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 21537ce4d1f2SVille Syrjälä 2154eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2155eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT)) 2156eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2157eef57324SJerome Anand 21587ce4d1f2SVille Syrjälä /* 21597ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 21607ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 21617ce4d1f2SVille Syrjälä */ 21627ce4d1f2SVille Syrjälä if (iir) 21637ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 21644a0a0202SVille Syrjälä 2165a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 21664a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 21671ae3c34cSVille Syrjälä 216852894874SVille Syrjälä if (gt_iir) 2169261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 217052894874SVille Syrjälä if (pm_iir) 217152894874SVille Syrjälä gen6_rps_irq_handler(dev_priv, pm_iir); 217252894874SVille Syrjälä 21731ae3c34cSVille Syrjälä if (hotplug_status) 217491d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 21752ecb8ca4SVille Syrjälä 217691d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 21771e1cace9SVille Syrjälä } while (0); 21787e231dbeSJesse Barnes 21791f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 21801f814dacSImre Deak 21817e231dbeSJesse Barnes return ret; 21827e231dbeSJesse Barnes } 21837e231dbeSJesse Barnes 218443f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg) 218543f328d7SVille Syrjälä { 218645a83f84SDaniel Vetter struct drm_device *dev = arg; 2187fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 218843f328d7SVille Syrjälä irqreturn_t ret = IRQ_NONE; 218943f328d7SVille Syrjälä 21902dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21912dd2a883SImre Deak return IRQ_NONE; 21922dd2a883SImre Deak 21931f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21941f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21951f814dacSImre Deak 2196579de73bSChris Wilson do { 21976e814800SVille Syrjälä u32 master_ctl, iir; 21982ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21991ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2200f0fd96f5SChris Wilson u32 gt_iir[4]; 2201a5e485a9SVille Syrjälä u32 ier = 0; 2202a5e485a9SVille Syrjälä 22038e5fd599SVille Syrjälä master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 22043278f67fSVille Syrjälä iir = I915_READ(VLV_IIR); 22053278f67fSVille Syrjälä 22063278f67fSVille Syrjälä if (master_ctl == 0 && iir == 0) 22078e5fd599SVille Syrjälä break; 220843f328d7SVille Syrjälä 220927b6c122SOscar Mateo ret = IRQ_HANDLED; 221027b6c122SOscar Mateo 2211a5e485a9SVille Syrjälä /* 2212a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2213a5e485a9SVille Syrjälä * 2214a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2215a5e485a9SVille Syrjälä * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2216a5e485a9SVille Syrjälä * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2217a5e485a9SVille Syrjälä * 2218a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2219a5e485a9SVille Syrjälä * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2220a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2221a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2222a5e485a9SVille Syrjälä * bits this time around. 2223a5e485a9SVille Syrjälä */ 222443f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 2225a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2226a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 222743f328d7SVille Syrjälä 2228e30e251aSVille Syrjälä gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 222927b6c122SOscar Mateo 223027b6c122SOscar Mateo if (iir & I915_DISPLAY_PORT_INTERRUPT) 22311ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 223243f328d7SVille Syrjälä 223327b6c122SOscar Mateo /* Call regardless, as some status bits might not be 223427b6c122SOscar Mateo * signalled in iir */ 2235eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 223643f328d7SVille Syrjälä 2237eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2238eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT | 2239eef57324SJerome Anand I915_LPE_PIPE_C_INTERRUPT)) 2240eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2241eef57324SJerome Anand 22427ce4d1f2SVille Syrjälä /* 22437ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 22447ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 22457ce4d1f2SVille Syrjälä */ 22467ce4d1f2SVille Syrjälä if (iir) 22477ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 22487ce4d1f2SVille Syrjälä 2249a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 2250e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 22511ae3c34cSVille Syrjälä 2252f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2253e30e251aSVille Syrjälä 22541ae3c34cSVille Syrjälä if (hotplug_status) 225591d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 22562ecb8ca4SVille Syrjälä 225791d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2258579de73bSChris Wilson } while (0); 22593278f67fSVille Syrjälä 22601f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 22611f814dacSImre Deak 226243f328d7SVille Syrjälä return ret; 226343f328d7SVille Syrjälä } 226443f328d7SVille Syrjälä 226591d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 226691d14251STvrtko Ursulin u32 hotplug_trigger, 226740e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2268776ad806SJesse Barnes { 226942db67d6SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2270776ad806SJesse Barnes 22716a39d7c9SJani Nikula /* 22726a39d7c9SJani Nikula * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 22736a39d7c9SJani Nikula * unless we touch the hotplug register, even if hotplug_trigger is 22746a39d7c9SJani Nikula * zero. Not acking leads to "The master control interrupt lied (SDE)!" 22756a39d7c9SJani Nikula * errors. 22766a39d7c9SJani Nikula */ 227713cf5504SDave Airlie dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 22786a39d7c9SJani Nikula if (!hotplug_trigger) { 22796a39d7c9SJani Nikula u32 mask = PORTA_HOTPLUG_STATUS_MASK | 22806a39d7c9SJani Nikula PORTD_HOTPLUG_STATUS_MASK | 22816a39d7c9SJani Nikula PORTC_HOTPLUG_STATUS_MASK | 22826a39d7c9SJani Nikula PORTB_HOTPLUG_STATUS_MASK; 22836a39d7c9SJani Nikula dig_hotplug_reg &= ~mask; 22846a39d7c9SJani Nikula } 22856a39d7c9SJani Nikula 228613cf5504SDave Airlie I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 22876a39d7c9SJani Nikula if (!hotplug_trigger) 22886a39d7c9SJani Nikula return; 228913cf5504SDave Airlie 2290cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 229140e56410SVille Syrjälä dig_hotplug_reg, hpd, 2292fd63e2a9SImre Deak pch_port_hotplug_long_detect); 229340e56410SVille Syrjälä 229491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2295aaf5ec2eSSonika Jindal } 229691d131d2SDaniel Vetter 229791d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 229840e56410SVille Syrjälä { 229940e56410SVille Syrjälä int pipe; 230040e56410SVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 230140e56410SVille Syrjälä 230291d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 230340e56410SVille Syrjälä 2304cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 2305cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2306776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 2307cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2308cfc33bf7SVille Syrjälä port_name(port)); 2309cfc33bf7SVille Syrjälä } 2310776ad806SJesse Barnes 2311ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 231291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2313ce99c256SDaniel Vetter 2314776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 231591d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2316776ad806SJesse Barnes 2317776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 2318776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2319776ad806SJesse Barnes 2320776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 2321776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2322776ad806SJesse Barnes 2323776ad806SJesse Barnes if (pch_iir & SDE_POISON) 2324776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 2325776ad806SJesse Barnes 23269db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 2327055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 23289db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 23299db4a9c7SJesse Barnes pipe_name(pipe), 23309db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 2331776ad806SJesse Barnes 2332776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2333776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2334776ad806SJesse Barnes 2335776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2336776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2337776ad806SJesse Barnes 2338776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2339a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 23408664281bSPaulo Zanoni 23418664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2342a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 23438664281bSPaulo Zanoni } 23448664281bSPaulo Zanoni 234591d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 23468664281bSPaulo Zanoni { 23478664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 23485a69b89fSDaniel Vetter enum pipe pipe; 23498664281bSPaulo Zanoni 2350de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 2351de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2352de032bf4SPaulo Zanoni 2353055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 23541f7247c0SDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 23551f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 23568664281bSPaulo Zanoni 23575a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 235891d14251STvrtko Ursulin if (IS_IVYBRIDGE(dev_priv)) 235991d14251STvrtko Ursulin ivb_pipe_crc_irq_handler(dev_priv, pipe); 23605a69b89fSDaniel Vetter else 236191d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 23625a69b89fSDaniel Vetter } 23635a69b89fSDaniel Vetter } 23648bf1e9f1SShuang He 23658664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 23668664281bSPaulo Zanoni } 23678664281bSPaulo Zanoni 236891d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 23698664281bSPaulo Zanoni { 23708664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 237145c1cd87SMika Kahola enum pipe pipe; 23728664281bSPaulo Zanoni 2373de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 2374de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 2375de032bf4SPaulo Zanoni 237645c1cd87SMika Kahola for_each_pipe(dev_priv, pipe) 237745c1cd87SMika Kahola if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 237845c1cd87SMika Kahola intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 23798664281bSPaulo Zanoni 23808664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 2381776ad806SJesse Barnes } 2382776ad806SJesse Barnes 238391d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 238423e81d69SAdam Jackson { 238523e81d69SAdam Jackson int pipe; 23866dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2387aaf5ec2eSSonika Jindal 238891d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 238991d131d2SDaniel Vetter 2390cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2391cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 239223e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 2393cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2394cfc33bf7SVille Syrjälä port_name(port)); 2395cfc33bf7SVille Syrjälä } 239623e81d69SAdam Jackson 239723e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 239891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 239923e81d69SAdam Jackson 240023e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 240191d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 240223e81d69SAdam Jackson 240323e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 240423e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 240523e81d69SAdam Jackson 240623e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 240723e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 240823e81d69SAdam Jackson 240923e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 2410055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 241123e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 241223e81d69SAdam Jackson pipe_name(pipe), 241323e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 24148664281bSPaulo Zanoni 24158664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 241691d14251STvrtko Ursulin cpt_serr_int_handler(dev_priv); 241723e81d69SAdam Jackson } 241823e81d69SAdam Jackson 241931604222SAnusha Srivatsa static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 242031604222SAnusha Srivatsa { 242131604222SAnusha Srivatsa u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 242231604222SAnusha Srivatsa u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 242331604222SAnusha Srivatsa u32 pin_mask = 0, long_mask = 0; 242431604222SAnusha Srivatsa 242531604222SAnusha Srivatsa if (ddi_hotplug_trigger) { 242631604222SAnusha Srivatsa u32 dig_hotplug_reg; 242731604222SAnusha Srivatsa 242831604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 242931604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 243031604222SAnusha Srivatsa 243131604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 243231604222SAnusha Srivatsa ddi_hotplug_trigger, 243331604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 243431604222SAnusha Srivatsa icp_ddi_port_hotplug_long_detect); 243531604222SAnusha Srivatsa } 243631604222SAnusha Srivatsa 243731604222SAnusha Srivatsa if (tc_hotplug_trigger) { 243831604222SAnusha Srivatsa u32 dig_hotplug_reg; 243931604222SAnusha Srivatsa 244031604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 244131604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 244231604222SAnusha Srivatsa 244331604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 244431604222SAnusha Srivatsa tc_hotplug_trigger, 244531604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 244631604222SAnusha Srivatsa icp_tc_port_hotplug_long_detect); 244731604222SAnusha Srivatsa } 244831604222SAnusha Srivatsa 244931604222SAnusha Srivatsa if (pin_mask) 245031604222SAnusha Srivatsa intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 245131604222SAnusha Srivatsa 245231604222SAnusha Srivatsa if (pch_iir & SDE_GMBUS_ICP) 245331604222SAnusha Srivatsa gmbus_irq_handler(dev_priv); 245431604222SAnusha Srivatsa } 245531604222SAnusha Srivatsa 245691d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 24576dbf30ceSVille Syrjälä { 24586dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 24596dbf30ceSVille Syrjälä ~SDE_PORTE_HOTPLUG_SPT; 24606dbf30ceSVille Syrjälä u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 24616dbf30ceSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 24626dbf30ceSVille Syrjälä 24636dbf30ceSVille Syrjälä if (hotplug_trigger) { 24646dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24656dbf30ceSVille Syrjälä 24666dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 24676dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 24686dbf30ceSVille Syrjälä 2469cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2470cf53902fSRodrigo Vivi hotplug_trigger, dig_hotplug_reg, hpd_spt, 247174c0b395SVille Syrjälä spt_port_hotplug_long_detect); 24726dbf30ceSVille Syrjälä } 24736dbf30ceSVille Syrjälä 24746dbf30ceSVille Syrjälä if (hotplug2_trigger) { 24756dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24766dbf30ceSVille Syrjälä 24776dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 24786dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 24796dbf30ceSVille Syrjälä 2480cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2481cf53902fSRodrigo Vivi hotplug2_trigger, dig_hotplug_reg, hpd_spt, 24826dbf30ceSVille Syrjälä spt_port_hotplug2_long_detect); 24836dbf30ceSVille Syrjälä } 24846dbf30ceSVille Syrjälä 24856dbf30ceSVille Syrjälä if (pin_mask) 248691d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 24876dbf30ceSVille Syrjälä 24886dbf30ceSVille Syrjälä if (pch_iir & SDE_GMBUS_CPT) 248991d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 24906dbf30ceSVille Syrjälä } 24916dbf30ceSVille Syrjälä 249291d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 249391d14251STvrtko Ursulin u32 hotplug_trigger, 249440e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2495c008bc6eSPaulo Zanoni { 2496e4ce95aaSVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2497e4ce95aaSVille Syrjälä 2498e4ce95aaSVille Syrjälä dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2499e4ce95aaSVille Syrjälä I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2500e4ce95aaSVille Syrjälä 2501cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 250240e56410SVille Syrjälä dig_hotplug_reg, hpd, 2503e4ce95aaSVille Syrjälä ilk_port_hotplug_long_detect); 250440e56410SVille Syrjälä 250591d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2506e4ce95aaSVille Syrjälä } 2507c008bc6eSPaulo Zanoni 250891d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 250991d14251STvrtko Ursulin u32 de_iir) 251040e56410SVille Syrjälä { 251140e56410SVille Syrjälä enum pipe pipe; 251240e56410SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 251340e56410SVille Syrjälä 251440e56410SVille Syrjälä if (hotplug_trigger) 251591d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 251640e56410SVille Syrjälä 2517c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 251891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2519c008bc6eSPaulo Zanoni 2520c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 252191d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2522c008bc6eSPaulo Zanoni 2523c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 2524c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2525c008bc6eSPaulo Zanoni 2526055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2527fd3a4024SDaniel Vetter if (de_iir & DE_PIPE_VBLANK(pipe)) 2528fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2529c008bc6eSPaulo Zanoni 253040da17c2SDaniel Vetter if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 25311f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2532c008bc6eSPaulo Zanoni 253340da17c2SDaniel Vetter if (de_iir & DE_PIPE_CRC_DONE(pipe)) 253491d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2535c008bc6eSPaulo Zanoni } 2536c008bc6eSPaulo Zanoni 2537c008bc6eSPaulo Zanoni /* check event from PCH */ 2538c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 2539c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 2540c008bc6eSPaulo Zanoni 254191d14251STvrtko Ursulin if (HAS_PCH_CPT(dev_priv)) 254291d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 2543c008bc6eSPaulo Zanoni else 254491d14251STvrtko Ursulin ibx_irq_handler(dev_priv, pch_iir); 2545c008bc6eSPaulo Zanoni 2546c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 2547c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 2548c008bc6eSPaulo Zanoni } 2549c008bc6eSPaulo Zanoni 2550*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) 255191d14251STvrtko Ursulin ironlake_rps_change_irq_handler(dev_priv); 2552c008bc6eSPaulo Zanoni } 2553c008bc6eSPaulo Zanoni 255491d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 255591d14251STvrtko Ursulin u32 de_iir) 25569719fb98SPaulo Zanoni { 255707d27e20SDamien Lespiau enum pipe pipe; 255823bb4cb5SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 255923bb4cb5SVille Syrjälä 256040e56410SVille Syrjälä if (hotplug_trigger) 256191d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 25629719fb98SPaulo Zanoni 25639719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 256491d14251STvrtko Ursulin ivb_err_int_handler(dev_priv); 25659719fb98SPaulo Zanoni 256654fd3149SDhinakaran Pandiyan if (de_iir & DE_EDP_PSR_INT_HSW) { 256754fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 256854fd3149SDhinakaran Pandiyan 256954fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 257054fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 257154fd3149SDhinakaran Pandiyan } 2572fc340442SDaniel Vetter 25739719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 257491d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 25759719fb98SPaulo Zanoni 25769719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 257791d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 25789719fb98SPaulo Zanoni 2579055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2580fd3a4024SDaniel Vetter if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2581fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 25829719fb98SPaulo Zanoni } 25839719fb98SPaulo Zanoni 25849719fb98SPaulo Zanoni /* check event from PCH */ 258591d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 25869719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 25879719fb98SPaulo Zanoni 258891d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 25899719fb98SPaulo Zanoni 25909719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 25919719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 25929719fb98SPaulo Zanoni } 25939719fb98SPaulo Zanoni } 25949719fb98SPaulo Zanoni 259572c90f62SOscar Mateo /* 259672c90f62SOscar Mateo * To handle irqs with the minimum potential races with fresh interrupts, we: 259772c90f62SOscar Mateo * 1 - Disable Master Interrupt Control. 259872c90f62SOscar Mateo * 2 - Find the source(s) of the interrupt. 259972c90f62SOscar Mateo * 3 - Clear the Interrupt Identity bits (IIR). 260072c90f62SOscar Mateo * 4 - Process the interrupt(s) that had bits set in the IIRs. 260172c90f62SOscar Mateo * 5 - Re-enable Master Interrupt Control. 260272c90f62SOscar Mateo */ 2603f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2604b1f14ad0SJesse Barnes { 260545a83f84SDaniel Vetter struct drm_device *dev = arg; 2606fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2607f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 26080e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 2609b1f14ad0SJesse Barnes 26102dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 26112dd2a883SImre Deak return IRQ_NONE; 26122dd2a883SImre Deak 26131f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26141f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 26151f814dacSImre Deak 2616b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 2617b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 2618b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 26190e43406bSChris Wilson 262044498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 262144498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 262244498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 262344498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 262444498aeaSPaulo Zanoni * due to its back queue). */ 262591d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv)) { 262644498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 262744498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 2628ab5c608bSBen Widawsky } 262944498aeaSPaulo Zanoni 263072c90f62SOscar Mateo /* Find, clear, then process each source of interrupt */ 263172c90f62SOscar Mateo 26320e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 26330e43406bSChris Wilson if (gt_iir) { 263472c90f62SOscar Mateo I915_WRITE(GTIIR, gt_iir); 263572c90f62SOscar Mateo ret = IRQ_HANDLED; 263691d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 2637261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 2638d8fc8a47SPaulo Zanoni else 2639261e40b8SVille Syrjälä ilk_gt_irq_handler(dev_priv, gt_iir); 26400e43406bSChris Wilson } 2641b1f14ad0SJesse Barnes 2642b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 26430e43406bSChris Wilson if (de_iir) { 264472c90f62SOscar Mateo I915_WRITE(DEIIR, de_iir); 264572c90f62SOscar Mateo ret = IRQ_HANDLED; 264691d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) 264791d14251STvrtko Ursulin ivb_display_irq_handler(dev_priv, de_iir); 2648f1af8fc1SPaulo Zanoni else 264991d14251STvrtko Ursulin ilk_display_irq_handler(dev_priv, de_iir); 26500e43406bSChris Wilson } 26510e43406bSChris Wilson 265291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 2653f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 26540e43406bSChris Wilson if (pm_iir) { 2655b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 26560e43406bSChris Wilson ret = IRQ_HANDLED; 265772c90f62SOscar Mateo gen6_rps_irq_handler(dev_priv, pm_iir); 26580e43406bSChris Wilson } 2659f1af8fc1SPaulo Zanoni } 2660b1f14ad0SJesse Barnes 2661b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 266274093f3eSChris Wilson if (!HAS_PCH_NOP(dev_priv)) 266344498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 2664b1f14ad0SJesse Barnes 26651f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26661f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 26671f814dacSImre Deak 2668b1f14ad0SJesse Barnes return ret; 2669b1f14ad0SJesse Barnes } 2670b1f14ad0SJesse Barnes 267191d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 267291d14251STvrtko Ursulin u32 hotplug_trigger, 267340e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2674d04a492dSShashank Sharma { 2675cebd87a0SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2676d04a492dSShashank Sharma 2677a52bb15bSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2678a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2679d04a492dSShashank Sharma 2680cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 268140e56410SVille Syrjälä dig_hotplug_reg, hpd, 2682cebd87a0SVille Syrjälä bxt_port_hotplug_long_detect); 268340e56410SVille Syrjälä 268491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2685d04a492dSShashank Sharma } 2686d04a492dSShashank Sharma 2687121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2688121e758eSDhinakaran Pandiyan { 2689121e758eSDhinakaran Pandiyan u32 pin_mask = 0, long_mask = 0; 2690b796b971SDhinakaran Pandiyan u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2691b796b971SDhinakaran Pandiyan u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2692121e758eSDhinakaran Pandiyan 2693121e758eSDhinakaran Pandiyan if (trigger_tc) { 2694b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2695b796b971SDhinakaran Pandiyan 2696121e758eSDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2697121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2698121e758eSDhinakaran Pandiyan 2699121e758eSDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2700b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2701121e758eSDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2702121e758eSDhinakaran Pandiyan } 2703b796b971SDhinakaran Pandiyan 2704b796b971SDhinakaran Pandiyan if (trigger_tbt) { 2705b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2706b796b971SDhinakaran Pandiyan 2707b796b971SDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2708b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2709b796b971SDhinakaran Pandiyan 2710b796b971SDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2711b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2712b796b971SDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2713b796b971SDhinakaran Pandiyan } 2714b796b971SDhinakaran Pandiyan 2715b796b971SDhinakaran Pandiyan if (pin_mask) 2716b796b971SDhinakaran Pandiyan intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2717b796b971SDhinakaran Pandiyan else 2718b796b971SDhinakaran Pandiyan DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2719121e758eSDhinakaran Pandiyan } 2720121e758eSDhinakaran Pandiyan 2721f11a0f46STvrtko Ursulin static irqreturn_t 2722f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2723abd58f01SBen Widawsky { 2724abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 2725f11a0f46STvrtko Ursulin u32 iir; 2726c42664ccSDaniel Vetter enum pipe pipe; 272788e04703SJesse Barnes 2728abd58f01SBen Widawsky if (master_ctl & GEN8_DE_MISC_IRQ) { 2729e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_MISC_IIR); 2730e32192e1STvrtko Ursulin if (iir) { 2731e04f7eceSVille Syrjälä bool found = false; 2732e04f7eceSVille Syrjälä 2733e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_MISC_IIR, iir); 2734abd58f01SBen Widawsky ret = IRQ_HANDLED; 2735e04f7eceSVille Syrjälä 2736e04f7eceSVille Syrjälä if (iir & GEN8_DE_MISC_GSE) { 273791d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2738e04f7eceSVille Syrjälä found = true; 2739e04f7eceSVille Syrjälä } 2740e04f7eceSVille Syrjälä 2741e04f7eceSVille Syrjälä if (iir & GEN8_DE_EDP_PSR) { 274254fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 274354fd3149SDhinakaran Pandiyan 274454fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 274554fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 2746e04f7eceSVille Syrjälä found = true; 2747e04f7eceSVille Syrjälä } 2748e04f7eceSVille Syrjälä 2749e04f7eceSVille Syrjälä if (!found) 275038cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Misc interrupt\n"); 2751abd58f01SBen Widawsky } 275238cc46d7SOscar Mateo else 275338cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2754abd58f01SBen Widawsky } 2755abd58f01SBen Widawsky 2756121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2757121e758eSDhinakaran Pandiyan iir = I915_READ(GEN11_DE_HPD_IIR); 2758121e758eSDhinakaran Pandiyan if (iir) { 2759121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IIR, iir); 2760121e758eSDhinakaran Pandiyan ret = IRQ_HANDLED; 2761121e758eSDhinakaran Pandiyan gen11_hpd_irq_handler(dev_priv, iir); 2762121e758eSDhinakaran Pandiyan } else { 2763121e758eSDhinakaran Pandiyan DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2764121e758eSDhinakaran Pandiyan } 2765121e758eSDhinakaran Pandiyan } 2766121e758eSDhinakaran Pandiyan 27676d766f02SDaniel Vetter if (master_ctl & GEN8_DE_PORT_IRQ) { 2768e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PORT_IIR); 2769e32192e1STvrtko Ursulin if (iir) { 2770e32192e1STvrtko Ursulin u32 tmp_mask; 2771d04a492dSShashank Sharma bool found = false; 2772cebd87a0SVille Syrjälä 2773e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PORT_IIR, iir); 27746d766f02SDaniel Vetter ret = IRQ_HANDLED; 277588e04703SJesse Barnes 2776e32192e1STvrtko Ursulin tmp_mask = GEN8_AUX_CHANNEL_A; 2777bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2778e32192e1STvrtko Ursulin tmp_mask |= GEN9_AUX_CHANNEL_B | 2779e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_C | 2780e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_D; 2781e32192e1STvrtko Ursulin 2782bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 2783bb187e93SJames Ausmus tmp_mask |= ICL_AUX_CHANNEL_E; 2784bb187e93SJames Ausmus 27859bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || 27869bb635d9SDhinakaran Pandiyan INTEL_GEN(dev_priv) >= 11) 2787a324fcacSRodrigo Vivi tmp_mask |= CNL_AUX_CHANNEL_F; 2788a324fcacSRodrigo Vivi 2789e32192e1STvrtko Ursulin if (iir & tmp_mask) { 279091d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2791d04a492dSShashank Sharma found = true; 2792d04a492dSShashank Sharma } 2793d04a492dSShashank Sharma 2794cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) { 2795e32192e1STvrtko Ursulin tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2796e32192e1STvrtko Ursulin if (tmp_mask) { 279791d14251STvrtko Ursulin bxt_hpd_irq_handler(dev_priv, tmp_mask, 279891d14251STvrtko Ursulin hpd_bxt); 2799d04a492dSShashank Sharma found = true; 2800d04a492dSShashank Sharma } 2801e32192e1STvrtko Ursulin } else if (IS_BROADWELL(dev_priv)) { 2802e32192e1STvrtko Ursulin tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2803e32192e1STvrtko Ursulin if (tmp_mask) { 280491d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, 280591d14251STvrtko Ursulin tmp_mask, hpd_bdw); 2806e32192e1STvrtko Ursulin found = true; 2807e32192e1STvrtko Ursulin } 2808e32192e1STvrtko Ursulin } 2809d04a492dSShashank Sharma 2810cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 281191d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 28129e63743eSShashank Sharma found = true; 28139e63743eSShashank Sharma } 28149e63743eSShashank Sharma 2815d04a492dSShashank Sharma if (!found) 281638cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Port interrupt\n"); 28176d766f02SDaniel Vetter } 281838cc46d7SOscar Mateo else 281938cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 28206d766f02SDaniel Vetter } 28216d766f02SDaniel Vetter 2822055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2823fd3a4024SDaniel Vetter u32 fault_errors; 2824abd58f01SBen Widawsky 2825c42664ccSDaniel Vetter if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2826c42664ccSDaniel Vetter continue; 2827c42664ccSDaniel Vetter 2828e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2829e32192e1STvrtko Ursulin if (!iir) { 2830e32192e1STvrtko Ursulin DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2831e32192e1STvrtko Ursulin continue; 2832e32192e1STvrtko Ursulin } 2833770de83dSDamien Lespiau 2834e32192e1STvrtko Ursulin ret = IRQ_HANDLED; 2835e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2836e32192e1STvrtko Ursulin 2837fd3a4024SDaniel Vetter if (iir & GEN8_PIPE_VBLANK) 2838fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2839abd58f01SBen Widawsky 2840e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 284191d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 28420fbe7870SDaniel Vetter 2843e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2844e32192e1STvrtko Ursulin intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 284538d83c96SDaniel Vetter 2846e32192e1STvrtko Ursulin fault_errors = iir; 2847bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2848e32192e1STvrtko Ursulin fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2849770de83dSDamien Lespiau else 2850e32192e1STvrtko Ursulin fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2851770de83dSDamien Lespiau 2852770de83dSDamien Lespiau if (fault_errors) 28531353ec38STvrtko Ursulin DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 285430100f2bSDaniel Vetter pipe_name(pipe), 2855e32192e1STvrtko Ursulin fault_errors); 2856abd58f01SBen Widawsky } 2857abd58f01SBen Widawsky 285891d14251STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2859266ea3d9SShashank Sharma master_ctl & GEN8_DE_PCH_IRQ) { 286092d03a80SDaniel Vetter /* 286192d03a80SDaniel Vetter * FIXME(BDW): Assume for now that the new interrupt handling 286292d03a80SDaniel Vetter * scheme also closed the SDE interrupt handling race we've seen 286392d03a80SDaniel Vetter * on older pch-split platforms. But this needs testing. 286492d03a80SDaniel Vetter */ 2865e32192e1STvrtko Ursulin iir = I915_READ(SDEIIR); 2866e32192e1STvrtko Ursulin if (iir) { 2867e32192e1STvrtko Ursulin I915_WRITE(SDEIIR, iir); 286892d03a80SDaniel Vetter ret = IRQ_HANDLED; 28696dbf30ceSVille Syrjälä 287031604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 287131604222SAnusha Srivatsa icp_irq_handler(dev_priv, iir); 287231604222SAnusha Srivatsa else if (HAS_PCH_SPT(dev_priv) || 287331604222SAnusha Srivatsa HAS_PCH_KBP(dev_priv) || 28747b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 287591d14251STvrtko Ursulin spt_irq_handler(dev_priv, iir); 28766dbf30ceSVille Syrjälä else 287791d14251STvrtko Ursulin cpt_irq_handler(dev_priv, iir); 28782dfb0b81SJani Nikula } else { 28792dfb0b81SJani Nikula /* 28802dfb0b81SJani Nikula * Like on previous PCH there seems to be something 28812dfb0b81SJani Nikula * fishy going on with forwarding PCH interrupts. 28822dfb0b81SJani Nikula */ 28832dfb0b81SJani Nikula DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 28842dfb0b81SJani Nikula } 288592d03a80SDaniel Vetter } 288692d03a80SDaniel Vetter 2887f11a0f46STvrtko Ursulin return ret; 2888f11a0f46STvrtko Ursulin } 2889f11a0f46STvrtko Ursulin 28904376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs) 28914376b9c9SMika Kuoppala { 28924376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 28934376b9c9SMika Kuoppala 28944376b9c9SMika Kuoppala /* 28954376b9c9SMika Kuoppala * Now with master disabled, get a sample of level indications 28964376b9c9SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 28974376b9c9SMika Kuoppala * New indications can and will light up during processing, 28984376b9c9SMika Kuoppala * and will generate new interrupt after enabling master. 28994376b9c9SMika Kuoppala */ 29004376b9c9SMika Kuoppala return raw_reg_read(regs, GEN8_MASTER_IRQ); 29014376b9c9SMika Kuoppala } 29024376b9c9SMika Kuoppala 29034376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs) 29044376b9c9SMika Kuoppala { 29054376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 29064376b9c9SMika Kuoppala } 29074376b9c9SMika Kuoppala 2908f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg) 2909f11a0f46STvrtko Ursulin { 2910f0fd96f5SChris Wilson struct drm_i915_private *dev_priv = to_i915(arg); 29114376b9c9SMika Kuoppala void __iomem * const regs = dev_priv->regs; 2912f11a0f46STvrtko Ursulin u32 master_ctl; 2913f0fd96f5SChris Wilson u32 gt_iir[4]; 2914f11a0f46STvrtko Ursulin 2915f11a0f46STvrtko Ursulin if (!intel_irqs_enabled(dev_priv)) 2916f11a0f46STvrtko Ursulin return IRQ_NONE; 2917f11a0f46STvrtko Ursulin 29184376b9c9SMika Kuoppala master_ctl = gen8_master_intr_disable(regs); 29194376b9c9SMika Kuoppala if (!master_ctl) { 29204376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 2921f11a0f46STvrtko Ursulin return IRQ_NONE; 29224376b9c9SMika Kuoppala } 2923f11a0f46STvrtko Ursulin 2924f11a0f46STvrtko Ursulin /* Find, clear, then process each source of interrupt */ 292555ef72f2SChris Wilson gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2926f0fd96f5SChris Wilson 2927f0fd96f5SChris Wilson /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2928f0fd96f5SChris Wilson if (master_ctl & ~GEN8_GT_IRQS) { 2929f0fd96f5SChris Wilson disable_rpm_wakeref_asserts(dev_priv); 293055ef72f2SChris Wilson gen8_de_irq_handler(dev_priv, master_ctl); 2931f0fd96f5SChris Wilson enable_rpm_wakeref_asserts(dev_priv); 2932f0fd96f5SChris Wilson } 2933f11a0f46STvrtko Ursulin 29344376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 2935abd58f01SBen Widawsky 2936f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 29371f814dacSImre Deak 293855ef72f2SChris Wilson return IRQ_HANDLED; 2939abd58f01SBen Widawsky } 2940abd58f01SBen Widawsky 294136703e79SChris Wilson struct wedge_me { 294236703e79SChris Wilson struct delayed_work work; 294336703e79SChris Wilson struct drm_i915_private *i915; 294436703e79SChris Wilson const char *name; 294536703e79SChris Wilson }; 294636703e79SChris Wilson 294736703e79SChris Wilson static void wedge_me(struct work_struct *work) 294836703e79SChris Wilson { 294936703e79SChris Wilson struct wedge_me *w = container_of(work, typeof(*w), work.work); 295036703e79SChris Wilson 295136703e79SChris Wilson dev_err(w->i915->drm.dev, 295236703e79SChris Wilson "%s timed out, cancelling all in-flight rendering.\n", 295336703e79SChris Wilson w->name); 295436703e79SChris Wilson i915_gem_set_wedged(w->i915); 295536703e79SChris Wilson } 295636703e79SChris Wilson 295736703e79SChris Wilson static void __init_wedge(struct wedge_me *w, 295836703e79SChris Wilson struct drm_i915_private *i915, 295936703e79SChris Wilson long timeout, 296036703e79SChris Wilson const char *name) 296136703e79SChris Wilson { 296236703e79SChris Wilson w->i915 = i915; 296336703e79SChris Wilson w->name = name; 296436703e79SChris Wilson 296536703e79SChris Wilson INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 296636703e79SChris Wilson schedule_delayed_work(&w->work, timeout); 296736703e79SChris Wilson } 296836703e79SChris Wilson 296936703e79SChris Wilson static void __fini_wedge(struct wedge_me *w) 297036703e79SChris Wilson { 297136703e79SChris Wilson cancel_delayed_work_sync(&w->work); 297236703e79SChris Wilson destroy_delayed_work_on_stack(&w->work); 297336703e79SChris Wilson w->i915 = NULL; 297436703e79SChris Wilson } 297536703e79SChris Wilson 297636703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 297736703e79SChris Wilson for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 297836703e79SChris Wilson (W)->i915; \ 297936703e79SChris Wilson __fini_wedge((W))) 298036703e79SChris Wilson 298151951ae7SMika Kuoppala static u32 2982f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915, 298351951ae7SMika Kuoppala const unsigned int bank, const unsigned int bit) 298451951ae7SMika Kuoppala { 298551951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 298651951ae7SMika Kuoppala u32 timeout_ts; 298751951ae7SMika Kuoppala u32 ident; 298851951ae7SMika Kuoppala 298996606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 299096606f3bSOscar Mateo 299151951ae7SMika Kuoppala raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 299251951ae7SMika Kuoppala 299351951ae7SMika Kuoppala /* 299451951ae7SMika Kuoppala * NB: Specs do not specify how long to spin wait, 299551951ae7SMika Kuoppala * so we do ~100us as an educated guess. 299651951ae7SMika Kuoppala */ 299751951ae7SMika Kuoppala timeout_ts = (local_clock() >> 10) + 100; 299851951ae7SMika Kuoppala do { 299951951ae7SMika Kuoppala ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 300051951ae7SMika Kuoppala } while (!(ident & GEN11_INTR_DATA_VALID) && 300151951ae7SMika Kuoppala !time_after32(local_clock() >> 10, timeout_ts)); 300251951ae7SMika Kuoppala 300351951ae7SMika Kuoppala if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 300451951ae7SMika Kuoppala DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 300551951ae7SMika Kuoppala bank, bit, ident); 300651951ae7SMika Kuoppala return 0; 300751951ae7SMika Kuoppala } 300851951ae7SMika Kuoppala 300951951ae7SMika Kuoppala raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 301051951ae7SMika Kuoppala GEN11_INTR_DATA_VALID); 301151951ae7SMika Kuoppala 3012f744dbc2SMika Kuoppala return ident; 3013f744dbc2SMika Kuoppala } 3014f744dbc2SMika Kuoppala 3015f744dbc2SMika Kuoppala static void 3016f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915, 3017f744dbc2SMika Kuoppala const u8 instance, const u16 iir) 3018f744dbc2SMika Kuoppala { 3019d02b98b8SOscar Mateo if (instance == OTHER_GTPM_INSTANCE) 3020d02b98b8SOscar Mateo return gen6_rps_irq_handler(i915, iir); 3021d02b98b8SOscar Mateo 3022f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3023f744dbc2SMika Kuoppala instance, iir); 3024f744dbc2SMika Kuoppala } 3025f744dbc2SMika Kuoppala 3026f744dbc2SMika Kuoppala static void 3027f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915, 3028f744dbc2SMika Kuoppala const u8 class, const u8 instance, const u16 iir) 3029f744dbc2SMika Kuoppala { 3030f744dbc2SMika Kuoppala struct intel_engine_cs *engine; 3031f744dbc2SMika Kuoppala 3032f744dbc2SMika Kuoppala if (instance <= MAX_ENGINE_INSTANCE) 3033f744dbc2SMika Kuoppala engine = i915->engine_class[class][instance]; 3034f744dbc2SMika Kuoppala else 3035f744dbc2SMika Kuoppala engine = NULL; 3036f744dbc2SMika Kuoppala 3037f744dbc2SMika Kuoppala if (likely(engine)) 3038f744dbc2SMika Kuoppala return gen8_cs_irq_handler(engine, iir); 3039f744dbc2SMika Kuoppala 3040f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3041f744dbc2SMika Kuoppala class, instance); 3042f744dbc2SMika Kuoppala } 3043f744dbc2SMika Kuoppala 3044f744dbc2SMika Kuoppala static void 3045f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915, 3046f744dbc2SMika Kuoppala const u32 identity) 3047f744dbc2SMika Kuoppala { 3048f744dbc2SMika Kuoppala const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3049f744dbc2SMika Kuoppala const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3050f744dbc2SMika Kuoppala const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3051f744dbc2SMika Kuoppala 3052f744dbc2SMika Kuoppala if (unlikely(!intr)) 3053f744dbc2SMika Kuoppala return; 3054f744dbc2SMika Kuoppala 3055f744dbc2SMika Kuoppala if (class <= COPY_ENGINE_CLASS) 3056f744dbc2SMika Kuoppala return gen11_engine_irq_handler(i915, class, instance, intr); 3057f744dbc2SMika Kuoppala 3058f744dbc2SMika Kuoppala if (class == OTHER_CLASS) 3059f744dbc2SMika Kuoppala return gen11_other_irq_handler(i915, instance, intr); 3060f744dbc2SMika Kuoppala 3061f744dbc2SMika Kuoppala WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3062f744dbc2SMika Kuoppala class, instance, intr); 306351951ae7SMika Kuoppala } 306451951ae7SMika Kuoppala 306551951ae7SMika Kuoppala static void 306696606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915, 306796606f3bSOscar Mateo const unsigned int bank) 306851951ae7SMika Kuoppala { 306951951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 307051951ae7SMika Kuoppala unsigned long intr_dw; 307151951ae7SMika Kuoppala unsigned int bit; 307251951ae7SMika Kuoppala 307396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 307451951ae7SMika Kuoppala 307551951ae7SMika Kuoppala intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 307651951ae7SMika Kuoppala 307751951ae7SMika Kuoppala if (unlikely(!intr_dw)) { 307851951ae7SMika Kuoppala DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 307996606f3bSOscar Mateo return; 308051951ae7SMika Kuoppala } 308151951ae7SMika Kuoppala 308251951ae7SMika Kuoppala for_each_set_bit(bit, &intr_dw, 32) { 3083f744dbc2SMika Kuoppala const u32 ident = gen11_gt_engine_identity(i915, 3084f744dbc2SMika Kuoppala bank, bit); 308551951ae7SMika Kuoppala 3086f744dbc2SMika Kuoppala gen11_gt_identity_handler(i915, ident); 308751951ae7SMika Kuoppala } 308851951ae7SMika Kuoppala 308951951ae7SMika Kuoppala /* Clear must be after shared has been served for engine */ 309051951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 309151951ae7SMika Kuoppala } 309296606f3bSOscar Mateo 309396606f3bSOscar Mateo static void 309496606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915, 309596606f3bSOscar Mateo const u32 master_ctl) 309696606f3bSOscar Mateo { 309796606f3bSOscar Mateo unsigned int bank; 309896606f3bSOscar Mateo 309996606f3bSOscar Mateo spin_lock(&i915->irq_lock); 310096606f3bSOscar Mateo 310196606f3bSOscar Mateo for (bank = 0; bank < 2; bank++) { 310296606f3bSOscar Mateo if (master_ctl & GEN11_GT_DW_IRQ(bank)) 310396606f3bSOscar Mateo gen11_gt_bank_handler(i915, bank); 310496606f3bSOscar Mateo } 310596606f3bSOscar Mateo 310696606f3bSOscar Mateo spin_unlock(&i915->irq_lock); 310751951ae7SMika Kuoppala } 310851951ae7SMika Kuoppala 31097a909383SChris Wilson static u32 31107a909383SChris Wilson gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3111df0d28c1SDhinakaran Pandiyan { 3112df0d28c1SDhinakaran Pandiyan void __iomem * const regs = dev_priv->regs; 31137a909383SChris Wilson u32 iir; 3114df0d28c1SDhinakaran Pandiyan 3115df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 31167a909383SChris Wilson return 0; 3117df0d28c1SDhinakaran Pandiyan 31187a909383SChris Wilson iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 31197a909383SChris Wilson if (likely(iir)) 31207a909383SChris Wilson raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 31217a909383SChris Wilson 31227a909383SChris Wilson return iir; 3123df0d28c1SDhinakaran Pandiyan } 3124df0d28c1SDhinakaran Pandiyan 3125df0d28c1SDhinakaran Pandiyan static void 31267a909383SChris Wilson gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3127df0d28c1SDhinakaran Pandiyan { 3128df0d28c1SDhinakaran Pandiyan if (iir & GEN11_GU_MISC_GSE) 3129df0d28c1SDhinakaran Pandiyan intel_opregion_asle_intr(dev_priv); 3130df0d28c1SDhinakaran Pandiyan } 3131df0d28c1SDhinakaran Pandiyan 313281067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs) 313381067b71SMika Kuoppala { 313481067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 313581067b71SMika Kuoppala 313681067b71SMika Kuoppala /* 313781067b71SMika Kuoppala * Now with master disabled, get a sample of level indications 313881067b71SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 313981067b71SMika Kuoppala * New indications can and will light up during processing, 314081067b71SMika Kuoppala * and will generate new interrupt after enabling master. 314181067b71SMika Kuoppala */ 314281067b71SMika Kuoppala return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 314381067b71SMika Kuoppala } 314481067b71SMika Kuoppala 314581067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs) 314681067b71SMika Kuoppala { 314781067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 314881067b71SMika Kuoppala } 314981067b71SMika Kuoppala 315051951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg) 315151951ae7SMika Kuoppala { 315251951ae7SMika Kuoppala struct drm_i915_private * const i915 = to_i915(arg); 315351951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 315451951ae7SMika Kuoppala u32 master_ctl; 3155df0d28c1SDhinakaran Pandiyan u32 gu_misc_iir; 315651951ae7SMika Kuoppala 315751951ae7SMika Kuoppala if (!intel_irqs_enabled(i915)) 315851951ae7SMika Kuoppala return IRQ_NONE; 315951951ae7SMika Kuoppala 316081067b71SMika Kuoppala master_ctl = gen11_master_intr_disable(regs); 316181067b71SMika Kuoppala if (!master_ctl) { 316281067b71SMika Kuoppala gen11_master_intr_enable(regs); 316351951ae7SMika Kuoppala return IRQ_NONE; 316481067b71SMika Kuoppala } 316551951ae7SMika Kuoppala 316651951ae7SMika Kuoppala /* Find, clear, then process each source of interrupt. */ 316751951ae7SMika Kuoppala gen11_gt_irq_handler(i915, master_ctl); 316851951ae7SMika Kuoppala 316951951ae7SMika Kuoppala /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 317051951ae7SMika Kuoppala if (master_ctl & GEN11_DISPLAY_IRQ) { 317151951ae7SMika Kuoppala const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 317251951ae7SMika Kuoppala 317351951ae7SMika Kuoppala disable_rpm_wakeref_asserts(i915); 317451951ae7SMika Kuoppala /* 317551951ae7SMika Kuoppala * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 317651951ae7SMika Kuoppala * for the display related bits. 317751951ae7SMika Kuoppala */ 317851951ae7SMika Kuoppala gen8_de_irq_handler(i915, disp_ctl); 317951951ae7SMika Kuoppala enable_rpm_wakeref_asserts(i915); 318051951ae7SMika Kuoppala } 318151951ae7SMika Kuoppala 31827a909383SChris Wilson gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3183df0d28c1SDhinakaran Pandiyan 318481067b71SMika Kuoppala gen11_master_intr_enable(regs); 318551951ae7SMika Kuoppala 31867a909383SChris Wilson gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3187df0d28c1SDhinakaran Pandiyan 318851951ae7SMika Kuoppala return IRQ_HANDLED; 318951951ae7SMika Kuoppala } 319051951ae7SMika Kuoppala 3191ce800754SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv, 3192d0667e9cSChris Wilson u32 engine_mask, 3193d0667e9cSChris Wilson const char *reason) 31948a905236SJesse Barnes { 3195ce800754SChris Wilson struct i915_gpu_error *error = &dev_priv->gpu_error; 319691c8a326SChris Wilson struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3197cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3198cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3199cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 320036703e79SChris Wilson struct wedge_me w; 32018a905236SJesse Barnes 3202c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 32038a905236SJesse Barnes 320444d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 3205c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 32061f83fee0SDaniel Vetter 320736703e79SChris Wilson /* Use a watchdog to ensure that our reset completes */ 320836703e79SChris Wilson i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3209c033666aSChris Wilson intel_prepare_reset(dev_priv); 32107514747dSVille Syrjälä 3211d0667e9cSChris Wilson error->reason = reason; 3212d0667e9cSChris Wilson error->stalled_mask = engine_mask; 3213ce800754SChris Wilson 321436703e79SChris Wilson /* Signal that locked waiters should reset the GPU */ 3215d0667e9cSChris Wilson smp_mb__before_atomic(); 3216ce800754SChris Wilson set_bit(I915_RESET_HANDOFF, &error->flags); 3217ce800754SChris Wilson wake_up_all(&error->wait_queue); 32188c185ecaSChris Wilson 321936703e79SChris Wilson /* Wait for anyone holding the lock to wakeup, without 322036703e79SChris Wilson * blocking indefinitely on struct_mutex. 322117e1df07SDaniel Vetter */ 322236703e79SChris Wilson do { 3223780f262aSChris Wilson if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3224d0667e9cSChris Wilson i915_reset(dev_priv, engine_mask, reason); 3225221fe799SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 3226780f262aSChris Wilson } 3227ce800754SChris Wilson } while (wait_on_bit_timeout(&error->flags, 32288c185ecaSChris Wilson I915_RESET_HANDOFF, 3229780f262aSChris Wilson TASK_UNINTERRUPTIBLE, 323036703e79SChris Wilson 1)); 3231f69061beSDaniel Vetter 3232d0667e9cSChris Wilson error->stalled_mask = 0; 3233ce800754SChris Wilson error->reason = NULL; 3234ce800754SChris Wilson 3235c033666aSChris Wilson intel_finish_reset(dev_priv); 323636703e79SChris Wilson } 3237f454c694SImre Deak 3238ce800754SChris Wilson if (!test_bit(I915_WEDGED, &error->flags)) 3239ce800754SChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3240f316a42cSBen Gamari } 32418a905236SJesse Barnes 324209605548SLionel Landwerlin void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3243c0e09200SDave Airlie { 3244eaa14c24SChris Wilson u32 eir; 324563eeaf38SJesse Barnes 3246*cf819effSLucas De Marchi if (!IS_GEN(dev_priv, 2)) 3247eaa14c24SChris Wilson I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 324863eeaf38SJesse Barnes 3249eaa14c24SChris Wilson if (INTEL_GEN(dev_priv) < 4) 3250eaa14c24SChris Wilson I915_WRITE(IPEIR, I915_READ(IPEIR)); 3251eaa14c24SChris Wilson else 3252eaa14c24SChris Wilson I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 32538a905236SJesse Barnes 3254eaa14c24SChris Wilson I915_WRITE(EIR, I915_READ(EIR)); 325563eeaf38SJesse Barnes eir = I915_READ(EIR); 325663eeaf38SJesse Barnes if (eir) { 325763eeaf38SJesse Barnes /* 325863eeaf38SJesse Barnes * some errors might have become stuck, 325963eeaf38SJesse Barnes * mask them. 326063eeaf38SJesse Barnes */ 3261eaa14c24SChris Wilson DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 326263eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 326378c357ddSVille Syrjälä I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 326463eeaf38SJesse Barnes } 326509605548SLionel Landwerlin 326609605548SLionel Landwerlin if (INTEL_GEN(dev_priv) >= 8) { 326709605548SLionel Landwerlin I915_WRITE(GEN8_RING_FAULT_REG, 326809605548SLionel Landwerlin I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID); 326909605548SLionel Landwerlin POSTING_READ(GEN8_RING_FAULT_REG); 327009605548SLionel Landwerlin } else if (INTEL_GEN(dev_priv) >= 6) { 327109605548SLionel Landwerlin struct intel_engine_cs *engine; 327209605548SLionel Landwerlin enum intel_engine_id id; 327309605548SLionel Landwerlin 327409605548SLionel Landwerlin for_each_engine(engine, dev_priv, id) { 327509605548SLionel Landwerlin I915_WRITE(RING_FAULT_REG(engine), 327609605548SLionel Landwerlin I915_READ(RING_FAULT_REG(engine)) & 327709605548SLionel Landwerlin ~RING_FAULT_VALID); 327809605548SLionel Landwerlin } 327909605548SLionel Landwerlin POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); 328009605548SLionel Landwerlin } 328135aed2e6SChris Wilson } 328235aed2e6SChris Wilson 328335aed2e6SChris Wilson /** 3284b8d24a06SMika Kuoppala * i915_handle_error - handle a gpu error 328514bb2c11STvrtko Ursulin * @dev_priv: i915 device private 328614b730fcSarun.siluvery@linux.intel.com * @engine_mask: mask representing engines that are hung 3287ce800754SChris Wilson * @flags: control flags 328887c390b6SMichel Thierry * @fmt: Error message format string 328987c390b6SMichel Thierry * 3290aafd8581SJavier Martinez Canillas * Do some basic checking of register state at error time and 329135aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 329235aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 329335aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 329435aed2e6SChris Wilson * of a ring dump etc.). 329535aed2e6SChris Wilson */ 3296c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv, 3297c033666aSChris Wilson u32 engine_mask, 3298ce800754SChris Wilson unsigned long flags, 329958174462SMika Kuoppala const char *fmt, ...) 330035aed2e6SChris Wilson { 3301142bc7d9SMichel Thierry struct intel_engine_cs *engine; 3302142bc7d9SMichel Thierry unsigned int tmp; 330358174462SMika Kuoppala char error_msg[80]; 3304ce800754SChris Wilson char *msg = NULL; 3305ce800754SChris Wilson 3306ce800754SChris Wilson if (fmt) { 3307ce800754SChris Wilson va_list args; 330835aed2e6SChris Wilson 330958174462SMika Kuoppala va_start(args, fmt); 331058174462SMika Kuoppala vscnprintf(error_msg, sizeof(error_msg), fmt, args); 331158174462SMika Kuoppala va_end(args); 331258174462SMika Kuoppala 3313ce800754SChris Wilson msg = error_msg; 3314ce800754SChris Wilson } 3315ce800754SChris Wilson 33161604a86dSChris Wilson /* 33171604a86dSChris Wilson * In most cases it's guaranteed that we get here with an RPM 33181604a86dSChris Wilson * reference held, for example because there is a pending GPU 33191604a86dSChris Wilson * request that won't finish until the reset is done. This 33201604a86dSChris Wilson * isn't the case at least when we get here by doing a 33211604a86dSChris Wilson * simulated reset via debugfs, so get an RPM reference. 33221604a86dSChris Wilson */ 33231604a86dSChris Wilson intel_runtime_pm_get(dev_priv); 33241604a86dSChris Wilson 3325873d66fbSChris Wilson engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3326ce800754SChris Wilson 3327ce800754SChris Wilson if (flags & I915_ERROR_CAPTURE) { 3328ce800754SChris Wilson i915_capture_error_state(dev_priv, engine_mask, msg); 3329eaa14c24SChris Wilson i915_clear_error_registers(dev_priv); 3330ce800754SChris Wilson } 33318a905236SJesse Barnes 3332142bc7d9SMichel Thierry /* 3333142bc7d9SMichel Thierry * Try engine reset when available. We fall back to full reset if 3334142bc7d9SMichel Thierry * single reset fails. 3335142bc7d9SMichel Thierry */ 33362bfbf6feSChris Wilson if (intel_has_reset_engine(dev_priv) && 33372bfbf6feSChris Wilson !i915_terminally_wedged(&dev_priv->gpu_error)) { 3338142bc7d9SMichel Thierry for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 33399db529aaSDaniel Vetter BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3340142bc7d9SMichel Thierry if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3341142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3342142bc7d9SMichel Thierry continue; 3343142bc7d9SMichel Thierry 3344ce800754SChris Wilson if (i915_reset_engine(engine, msg) == 0) 3345142bc7d9SMichel Thierry engine_mask &= ~intel_engine_flag(engine); 3346142bc7d9SMichel Thierry 3347142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3348142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3349142bc7d9SMichel Thierry wake_up_bit(&dev_priv->gpu_error.flags, 3350142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id); 3351142bc7d9SMichel Thierry } 3352142bc7d9SMichel Thierry } 3353142bc7d9SMichel Thierry 33548af29b0cSChris Wilson if (!engine_mask) 33551604a86dSChris Wilson goto out; 33568af29b0cSChris Wilson 3357142bc7d9SMichel Thierry /* Full reset needs the mutex, stop any other user trying to do so. */ 3358d5367307SChris Wilson if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3359d5367307SChris Wilson wait_event(dev_priv->gpu_error.reset_queue, 3360d5367307SChris Wilson !test_bit(I915_RESET_BACKOFF, 3361d5367307SChris Wilson &dev_priv->gpu_error.flags)); 33621604a86dSChris Wilson goto out; 3363d5367307SChris Wilson } 3364ba1234d1SBen Gamari 3365142bc7d9SMichel Thierry /* Prevent any other reset-engine attempt. */ 3366142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3367142bc7d9SMichel Thierry while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3368142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3369142bc7d9SMichel Thierry wait_on_bit(&dev_priv->gpu_error.flags, 3370142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id, 3371142bc7d9SMichel Thierry TASK_UNINTERRUPTIBLE); 3372142bc7d9SMichel Thierry } 3373142bc7d9SMichel Thierry 3374d0667e9cSChris Wilson i915_reset_device(dev_priv, engine_mask, msg); 3375d5367307SChris Wilson 3376142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3377142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3378142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3379142bc7d9SMichel Thierry } 3380142bc7d9SMichel Thierry 3381d5367307SChris Wilson clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3382d5367307SChris Wilson wake_up_all(&dev_priv->gpu_error.reset_queue); 33831604a86dSChris Wilson 33841604a86dSChris Wilson out: 33851604a86dSChris Wilson intel_runtime_pm_put(dev_priv); 33868a905236SJesse Barnes } 33878a905236SJesse Barnes 338842f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 338942f52ef8SKeith Packard * we use as a pipe index 339042f52ef8SKeith Packard */ 339186e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 33920a3e67a4SJesse Barnes { 3393fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3394e9d21d7fSKeith Packard unsigned long irqflags; 339571e0ffa5SJesse Barnes 33961ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 339786e83e35SChris Wilson i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 339886e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 339986e83e35SChris Wilson 340086e83e35SChris Wilson return 0; 340186e83e35SChris Wilson } 340286e83e35SChris Wilson 340386e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 340486e83e35SChris Wilson { 340586e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 340686e83e35SChris Wilson unsigned long irqflags; 340786e83e35SChris Wilson 340886e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34097c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 3410755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34111ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34128692d00eSChris Wilson 34130a3e67a4SJesse Barnes return 0; 34140a3e67a4SJesse Barnes } 34150a3e67a4SJesse Barnes 341688e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3417f796cf8fSJesse Barnes { 3418fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3419f796cf8fSJesse Barnes unsigned long irqflags; 342055b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 342186e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3422f796cf8fSJesse Barnes 3423f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3424fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, bit); 3425b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3426b1f14ad0SJesse Barnes 34272e8bf223SDhinakaran Pandiyan /* Even though there is no DMC, frame counter can get stuck when 34282e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated. 34292e8bf223SDhinakaran Pandiyan */ 34302e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34312e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34322e8bf223SDhinakaran Pandiyan 3433b1f14ad0SJesse Barnes return 0; 3434b1f14ad0SJesse Barnes } 3435b1f14ad0SJesse Barnes 343688e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3437abd58f01SBen Widawsky { 3438fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3439abd58f01SBen Widawsky unsigned long irqflags; 3440abd58f01SBen Widawsky 3441abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3442013d3752SVille Syrjälä bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3443abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3444013d3752SVille Syrjälä 34452e8bf223SDhinakaran Pandiyan /* Even if there is no DMC, frame counter can get stuck when 34462e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated, so check only for PSR. 34472e8bf223SDhinakaran Pandiyan */ 34482e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34492e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34502e8bf223SDhinakaran Pandiyan 3451abd58f01SBen Widawsky return 0; 3452abd58f01SBen Widawsky } 3453abd58f01SBen Widawsky 345442f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 345542f52ef8SKeith Packard * we use as a pipe index 345642f52ef8SKeith Packard */ 345786e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 345886e83e35SChris Wilson { 345986e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 346086e83e35SChris Wilson unsigned long irqflags; 346186e83e35SChris Wilson 346286e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 346386e83e35SChris Wilson i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 346486e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 346586e83e35SChris Wilson } 346686e83e35SChris Wilson 346786e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 34680a3e67a4SJesse Barnes { 3469fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3470e9d21d7fSKeith Packard unsigned long irqflags; 34710a3e67a4SJesse Barnes 34721ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34737c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 3474755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34751ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34760a3e67a4SJesse Barnes } 34770a3e67a4SJesse Barnes 347888e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3479f796cf8fSJesse Barnes { 3480fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3481f796cf8fSJesse Barnes unsigned long irqflags; 348255b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 348386e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3484f796cf8fSJesse Barnes 3485f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3486fbdedaeaSVille Syrjälä ilk_disable_display_irq(dev_priv, bit); 3487b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3488b1f14ad0SJesse Barnes } 3489b1f14ad0SJesse Barnes 349088e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3491abd58f01SBen Widawsky { 3492fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3493abd58f01SBen Widawsky unsigned long irqflags; 3494abd58f01SBen Widawsky 3495abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3496013d3752SVille Syrjälä bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3497abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3498abd58f01SBen Widawsky } 3499abd58f01SBen Widawsky 3500b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv) 350191738a95SPaulo Zanoni { 35026e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 350391738a95SPaulo Zanoni return; 350491738a95SPaulo Zanoni 35053488d4ebSVille Syrjälä GEN3_IRQ_RESET(SDE); 3506105b122eSPaulo Zanoni 35076e266956STvrtko Ursulin if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3508105b122eSPaulo Zanoni I915_WRITE(SERR_INT, 0xffffffff); 3509622364b6SPaulo Zanoni } 3510105b122eSPaulo Zanoni 351191738a95SPaulo Zanoni /* 3512622364b6SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed PCH 3513622364b6SPaulo Zanoni * interrupts. Hence we can't update it after the interrupt handler is enabled - 3514622364b6SPaulo Zanoni * instead we unconditionally enable all PCH interrupt sources here, but then 3515622364b6SPaulo Zanoni * only unmask them as needed with SDEIMR. 3516622364b6SPaulo Zanoni * 3517622364b6SPaulo Zanoni * This function needs to be called before interrupts are enabled. 351891738a95SPaulo Zanoni */ 3519622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev) 3520622364b6SPaulo Zanoni { 3521fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3522622364b6SPaulo Zanoni 35236e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3524622364b6SPaulo Zanoni return; 3525622364b6SPaulo Zanoni 3526622364b6SPaulo Zanoni WARN_ON(I915_READ(SDEIER) != 0); 352791738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 352891738a95SPaulo Zanoni POSTING_READ(SDEIER); 352991738a95SPaulo Zanoni } 353091738a95SPaulo Zanoni 3531b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3532d18ea1b5SDaniel Vetter { 35333488d4ebSVille Syrjälä GEN3_IRQ_RESET(GT); 3534b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 35353488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN6_PM); 3536d18ea1b5SDaniel Vetter } 3537d18ea1b5SDaniel Vetter 353870591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 353970591a41SVille Syrjälä { 354071b8b41dSVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 354171b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 354271b8b41dSVille Syrjälä else 354371b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 354471b8b41dSVille Syrjälä 3545ad22d106SVille Syrjälä i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 354670591a41SVille Syrjälä I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 354770591a41SVille Syrjälä 354844d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 354970591a41SVille Syrjälä 35503488d4ebSVille Syrjälä GEN3_IRQ_RESET(VLV_); 35518bd099a7SChris Wilson dev_priv->irq_mask = ~0u; 355270591a41SVille Syrjälä } 355370591a41SVille Syrjälä 35548bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 35558bb61306SVille Syrjälä { 35568bb61306SVille Syrjälä u32 pipestat_mask; 35579ab981f2SVille Syrjälä u32 enable_mask; 35588bb61306SVille Syrjälä enum pipe pipe; 35598bb61306SVille Syrjälä 3560842ebf7aSVille Syrjälä pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 35618bb61306SVille Syrjälä 35628bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 35638bb61306SVille Syrjälä for_each_pipe(dev_priv, pipe) 35648bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 35658bb61306SVille Syrjälä 35669ab981f2SVille Syrjälä enable_mask = I915_DISPLAY_PORT_INTERRUPT | 35678bb61306SVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3568ebf5f921SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3569ebf5f921SVille Syrjälä I915_LPE_PIPE_A_INTERRUPT | 3570ebf5f921SVille Syrjälä I915_LPE_PIPE_B_INTERRUPT; 3571ebf5f921SVille Syrjälä 35728bb61306SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 3573ebf5f921SVille Syrjälä enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3574ebf5f921SVille Syrjälä I915_LPE_PIPE_C_INTERRUPT; 35756b7eafc1SVille Syrjälä 35768bd099a7SChris Wilson WARN_ON(dev_priv->irq_mask != ~0u); 35776b7eafc1SVille Syrjälä 35789ab981f2SVille Syrjälä dev_priv->irq_mask = ~enable_mask; 35798bb61306SVille Syrjälä 35803488d4ebSVille Syrjälä GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 35818bb61306SVille Syrjälä } 35828bb61306SVille Syrjälä 35838bb61306SVille Syrjälä /* drm_dma.h hooks 35848bb61306SVille Syrjälä */ 35858bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev) 35868bb61306SVille Syrjälä { 3587fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35888bb61306SVille Syrjälä 3589*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 5)) 35908bb61306SVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 35918bb61306SVille Syrjälä 35923488d4ebSVille Syrjälä GEN3_IRQ_RESET(DE); 3593*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 7)) 35948bb61306SVille Syrjälä I915_WRITE(GEN7_ERR_INT, 0xffffffff); 35958bb61306SVille Syrjälä 3596fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 3597fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3598fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3599fc340442SDaniel Vetter } 3600fc340442SDaniel Vetter 3601b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 36028bb61306SVille Syrjälä 3603b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 36048bb61306SVille Syrjälä } 36058bb61306SVille Syrjälä 36066bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev) 36077e231dbeSJesse Barnes { 3608fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 36097e231dbeSJesse Barnes 361034c7b8a7SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 361134c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 361234c7b8a7SVille Syrjälä 3613b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 36147e231dbeSJesse Barnes 3615ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 36169918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 361770591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3618ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 36197e231dbeSJesse Barnes } 36207e231dbeSJesse Barnes 3621d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3622d6e3cca3SDaniel Vetter { 3623d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 0); 3624d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 1); 3625d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 2); 3626d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 3); 3627d6e3cca3SDaniel Vetter } 3628d6e3cca3SDaniel Vetter 3629823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev) 3630abd58f01SBen Widawsky { 3631fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3632abd58f01SBen Widawsky int pipe; 3633abd58f01SBen Widawsky 36344376b9c9SMika Kuoppala gen8_master_intr_disable(dev_priv->regs); 3635abd58f01SBen Widawsky 3636d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 3637abd58f01SBen Widawsky 3638e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3639e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3640e04f7eceSVille Syrjälä 3641055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 3642f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 3643813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 3644f86f3fb0SPaulo Zanoni GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3645abd58f01SBen Widawsky 36463488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_PORT_); 36473488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_MISC_); 36483488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 3649abd58f01SBen Widawsky 36506e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 3651b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 3652abd58f01SBen Widawsky } 3653abd58f01SBen Widawsky 365451951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 365551951ae7SMika Kuoppala { 365651951ae7SMika Kuoppala /* Disable RCS, BCS, VCS and VECS class engines. */ 365751951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 365851951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 365951951ae7SMika Kuoppala 366051951ae7SMika Kuoppala /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 366151951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 366251951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 366351951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 366451951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 366551951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3666d02b98b8SOscar Mateo 3667d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3668d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 366951951ae7SMika Kuoppala } 367051951ae7SMika Kuoppala 367151951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev) 367251951ae7SMika Kuoppala { 367351951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 367451951ae7SMika Kuoppala int pipe; 367551951ae7SMika Kuoppala 367681067b71SMika Kuoppala gen11_master_intr_disable(dev_priv->regs); 367751951ae7SMika Kuoppala 367851951ae7SMika Kuoppala gen11_gt_irq_reset(dev_priv); 367951951ae7SMika Kuoppala 368051951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 368151951ae7SMika Kuoppala 368262819dfdSJosé Roberto de Souza I915_WRITE(EDP_PSR_IMR, 0xffffffff); 368362819dfdSJosé Roberto de Souza I915_WRITE(EDP_PSR_IIR, 0xffffffff); 368462819dfdSJosé Roberto de Souza 368551951ae7SMika Kuoppala for_each_pipe(dev_priv, pipe) 368651951ae7SMika Kuoppala if (intel_display_power_is_enabled(dev_priv, 368751951ae7SMika Kuoppala POWER_DOMAIN_PIPE(pipe))) 368851951ae7SMika Kuoppala GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 368951951ae7SMika Kuoppala 369051951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_PORT_); 369151951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_MISC_); 3692121e758eSDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_DE_HPD_); 3693df0d28c1SDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_GU_MISC_); 369451951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_PCU_); 369531604222SAnusha Srivatsa 369631604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 369731604222SAnusha Srivatsa GEN3_IRQ_RESET(SDE); 369851951ae7SMika Kuoppala } 369951951ae7SMika Kuoppala 37004c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3701001bd2cbSImre Deak u8 pipe_mask) 3702d49bdb0eSPaulo Zanoni { 37031180e206SPaulo Zanoni uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 37046831f3e3SVille Syrjälä enum pipe pipe; 3705d49bdb0eSPaulo Zanoni 370613321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 37079dfe2e3aSImre Deak 37089dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 37099dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 37109dfe2e3aSImre Deak return; 37119dfe2e3aSImre Deak } 37129dfe2e3aSImre Deak 37136831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37146831f3e3SVille Syrjälä GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 37156831f3e3SVille Syrjälä dev_priv->de_irq_mask[pipe], 37166831f3e3SVille Syrjälä ~dev_priv->de_irq_mask[pipe] | extra_ier); 37179dfe2e3aSImre Deak 371813321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 3719d49bdb0eSPaulo Zanoni } 3720d49bdb0eSPaulo Zanoni 3721aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3722001bd2cbSImre Deak u8 pipe_mask) 3723aae8ba84SVille Syrjälä { 37246831f3e3SVille Syrjälä enum pipe pipe; 37256831f3e3SVille Syrjälä 3726aae8ba84SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37279dfe2e3aSImre Deak 37289dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 37299dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 37309dfe2e3aSImre Deak return; 37319dfe2e3aSImre Deak } 37329dfe2e3aSImre Deak 37336831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37346831f3e3SVille Syrjälä GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 37359dfe2e3aSImre Deak 3736aae8ba84SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 3737aae8ba84SVille Syrjälä 3738aae8ba84SVille Syrjälä /* make sure we're done processing display irqs */ 373991c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 3740aae8ba84SVille Syrjälä } 3741aae8ba84SVille Syrjälä 37426bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev) 374343f328d7SVille Syrjälä { 3744fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 374543f328d7SVille Syrjälä 374643f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 374743f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 374843f328d7SVille Syrjälä 3749d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 375043f328d7SVille Syrjälä 37513488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 375243f328d7SVille Syrjälä 3753ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37549918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 375570591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3756ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 375743f328d7SVille Syrjälä } 375843f328d7SVille Syrjälä 375991d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 376087a02106SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 376187a02106SVille Syrjälä { 376287a02106SVille Syrjälä struct intel_encoder *encoder; 376387a02106SVille Syrjälä u32 enabled_irqs = 0; 376487a02106SVille Syrjälä 376591c8a326SChris Wilson for_each_intel_encoder(&dev_priv->drm, encoder) 376687a02106SVille Syrjälä if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 376787a02106SVille Syrjälä enabled_irqs |= hpd[encoder->hpd_pin]; 376887a02106SVille Syrjälä 376987a02106SVille Syrjälä return enabled_irqs; 377087a02106SVille Syrjälä } 377187a02106SVille Syrjälä 37721a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 37731a56b1a2SImre Deak { 37741a56b1a2SImre Deak u32 hotplug; 37751a56b1a2SImre Deak 37761a56b1a2SImre Deak /* 37771a56b1a2SImre Deak * Enable digital hotplug on the PCH, and configure the DP short pulse 37781a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec). 37791a56b1a2SImre Deak * The pulse duration bits are reserved on LPT+. 37801a56b1a2SImre Deak */ 37811a56b1a2SImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 37821a56b1a2SImre Deak hotplug &= ~(PORTB_PULSE_DURATION_MASK | 37831a56b1a2SImre Deak PORTC_PULSE_DURATION_MASK | 37841a56b1a2SImre Deak PORTD_PULSE_DURATION_MASK); 37851a56b1a2SImre Deak hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 37861a56b1a2SImre Deak hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 37871a56b1a2SImre Deak hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 37881a56b1a2SImre Deak /* 37891a56b1a2SImre Deak * When CPU and PCH are on the same package, port A 37901a56b1a2SImre Deak * HPD must be enabled in both north and south. 37911a56b1a2SImre Deak */ 37921a56b1a2SImre Deak if (HAS_PCH_LPT_LP(dev_priv)) 37931a56b1a2SImre Deak hotplug |= PORTA_HOTPLUG_ENABLE; 37941a56b1a2SImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 37951a56b1a2SImre Deak } 37961a56b1a2SImre Deak 379791d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 379882a28bcfSDaniel Vetter { 37991a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 380082a28bcfSDaniel Vetter 380191d14251STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) { 3802fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 380391d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 380482a28bcfSDaniel Vetter } else { 3805fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 380691d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 380782a28bcfSDaniel Vetter } 380882a28bcfSDaniel Vetter 3809fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 381082a28bcfSDaniel Vetter 38111a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 38126dbf30ceSVille Syrjälä } 381326951cafSXiong Zhang 381431604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 381531604222SAnusha Srivatsa { 381631604222SAnusha Srivatsa u32 hotplug; 381731604222SAnusha Srivatsa 381831604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_DDI); 381931604222SAnusha Srivatsa hotplug |= ICP_DDIA_HPD_ENABLE | 382031604222SAnusha Srivatsa ICP_DDIB_HPD_ENABLE; 382131604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 382231604222SAnusha Srivatsa 382331604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_TC); 382431604222SAnusha Srivatsa hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 382531604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC2) | 382631604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC3) | 382731604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC4); 382831604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 382931604222SAnusha Srivatsa } 383031604222SAnusha Srivatsa 383131604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 383231604222SAnusha Srivatsa { 383331604222SAnusha Srivatsa u32 hotplug_irqs, enabled_irqs; 383431604222SAnusha Srivatsa 383531604222SAnusha Srivatsa hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 383631604222SAnusha Srivatsa enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 383731604222SAnusha Srivatsa 383831604222SAnusha Srivatsa ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 383931604222SAnusha Srivatsa 384031604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 384131604222SAnusha Srivatsa } 384231604222SAnusha Srivatsa 3843121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3844121e758eSDhinakaran Pandiyan { 3845121e758eSDhinakaran Pandiyan u32 hotplug; 3846121e758eSDhinakaran Pandiyan 3847121e758eSDhinakaran Pandiyan hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3848121e758eSDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3849121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3850121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3851121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3852121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3853b796b971SDhinakaran Pandiyan 3854b796b971SDhinakaran Pandiyan hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3855b796b971SDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3856b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3857b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3858b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3859b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3860121e758eSDhinakaran Pandiyan } 3861121e758eSDhinakaran Pandiyan 3862121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3863121e758eSDhinakaran Pandiyan { 3864121e758eSDhinakaran Pandiyan u32 hotplug_irqs, enabled_irqs; 3865121e758eSDhinakaran Pandiyan u32 val; 3866121e758eSDhinakaran Pandiyan 3867b796b971SDhinakaran Pandiyan enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3868b796b971SDhinakaran Pandiyan hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3869121e758eSDhinakaran Pandiyan 3870121e758eSDhinakaran Pandiyan val = I915_READ(GEN11_DE_HPD_IMR); 3871121e758eSDhinakaran Pandiyan val &= ~hotplug_irqs; 3872121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IMR, val); 3873121e758eSDhinakaran Pandiyan POSTING_READ(GEN11_DE_HPD_IMR); 3874121e758eSDhinakaran Pandiyan 3875121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 387631604222SAnusha Srivatsa 387731604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 387831604222SAnusha Srivatsa icp_hpd_irq_setup(dev_priv); 3879121e758eSDhinakaran Pandiyan } 3880121e758eSDhinakaran Pandiyan 38812a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 38822a57d9ccSImre Deak { 38833b92e263SRodrigo Vivi u32 val, hotplug; 38843b92e263SRodrigo Vivi 38853b92e263SRodrigo Vivi /* Display WA #1179 WaHardHangonHotPlug: cnp */ 38863b92e263SRodrigo Vivi if (HAS_PCH_CNP(dev_priv)) { 38873b92e263SRodrigo Vivi val = I915_READ(SOUTH_CHICKEN1); 38883b92e263SRodrigo Vivi val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 38893b92e263SRodrigo Vivi val |= CHASSIS_CLK_REQ_DURATION(0xf); 38903b92e263SRodrigo Vivi I915_WRITE(SOUTH_CHICKEN1, val); 38913b92e263SRodrigo Vivi } 38922a57d9ccSImre Deak 38932a57d9ccSImre Deak /* Enable digital hotplug on the PCH */ 38942a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 38952a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 38962a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 38972a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE | 38982a57d9ccSImre Deak PORTD_HOTPLUG_ENABLE; 38992a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 39002a57d9ccSImre Deak 39012a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG2); 39022a57d9ccSImre Deak hotplug |= PORTE_HOTPLUG_ENABLE; 39032a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 39042a57d9ccSImre Deak } 39052a57d9ccSImre Deak 390691d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 39076dbf30ceSVille Syrjälä { 39082a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39096dbf30ceSVille Syrjälä 39106dbf30ceSVille Syrjälä hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 391191d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 39126dbf30ceSVille Syrjälä 39136dbf30ceSVille Syrjälä ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 39146dbf30ceSVille Syrjälä 39152a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 391626951cafSXiong Zhang } 39177fe0b973SKeith Packard 39181a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 39191a56b1a2SImre Deak { 39201a56b1a2SImre Deak u32 hotplug; 39211a56b1a2SImre Deak 39221a56b1a2SImre Deak /* 39231a56b1a2SImre Deak * Enable digital hotplug on the CPU, and configure the DP short pulse 39241a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec) 39251a56b1a2SImre Deak * The pulse duration bits are reserved on HSW+. 39261a56b1a2SImre Deak */ 39271a56b1a2SImre Deak hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 39281a56b1a2SImre Deak hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 39291a56b1a2SImre Deak hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 39301a56b1a2SImre Deak DIGITAL_PORTA_PULSE_DURATION_2ms; 39311a56b1a2SImre Deak I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 39321a56b1a2SImre Deak } 39331a56b1a2SImre Deak 393491d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3935e4ce95aaSVille Syrjälä { 39361a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 3937e4ce95aaSVille Syrjälä 393891d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 8) { 39393a3b3c7dSVille Syrjälä hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 394091d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 39413a3b3c7dSVille Syrjälä 39423a3b3c7dSVille Syrjälä bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 394391d14251STvrtko Ursulin } else if (INTEL_GEN(dev_priv) >= 7) { 394423bb4cb5SVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 394591d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 39463a3b3c7dSVille Syrjälä 39473a3b3c7dSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 394823bb4cb5SVille Syrjälä } else { 3949e4ce95aaSVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG; 395091d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3951e4ce95aaSVille Syrjälä 3952e4ce95aaSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 39533a3b3c7dSVille Syrjälä } 3954e4ce95aaSVille Syrjälä 39551a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 3956e4ce95aaSVille Syrjälä 395791d14251STvrtko Ursulin ibx_hpd_irq_setup(dev_priv); 3958e4ce95aaSVille Syrjälä } 3959e4ce95aaSVille Syrjälä 39602a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 39612a57d9ccSImre Deak u32 enabled_irqs) 3962e0a20ad7SShashank Sharma { 39632a57d9ccSImre Deak u32 hotplug; 3964e0a20ad7SShashank Sharma 3965a52bb15bSVille Syrjälä hotplug = I915_READ(PCH_PORT_HOTPLUG); 39662a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 39672a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 39682a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE; 3969d252bf68SShubhangi Shrivastava 3970d252bf68SShubhangi Shrivastava DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3971d252bf68SShubhangi Shrivastava hotplug, enabled_irqs); 3972d252bf68SShubhangi Shrivastava hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3973d252bf68SShubhangi Shrivastava 3974d252bf68SShubhangi Shrivastava /* 3975d252bf68SShubhangi Shrivastava * For BXT invert bit has to be set based on AOB design 3976d252bf68SShubhangi Shrivastava * for HPD detection logic, update it based on VBT fields. 3977d252bf68SShubhangi Shrivastava */ 3978d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3979d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3980d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIA_HPD_INVERT; 3981d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3982d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3983d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIB_HPD_INVERT; 3984d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3985d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3986d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIC_HPD_INVERT; 3987d252bf68SShubhangi Shrivastava 3988a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3989e0a20ad7SShashank Sharma } 3990e0a20ad7SShashank Sharma 39912a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 39922a57d9ccSImre Deak { 39932a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 39942a57d9ccSImre Deak } 39952a57d9ccSImre Deak 39962a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 39972a57d9ccSImre Deak { 39982a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39992a57d9ccSImre Deak 40002a57d9ccSImre Deak enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 40012a57d9ccSImre Deak hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 40022a57d9ccSImre Deak 40032a57d9ccSImre Deak bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 40042a57d9ccSImre Deak 40052a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 40062a57d9ccSImre Deak } 40072a57d9ccSImre Deak 4008d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 4009d46da437SPaulo Zanoni { 4010fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 401182a28bcfSDaniel Vetter u32 mask; 4012d46da437SPaulo Zanoni 40136e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 4014692a04cfSDaniel Vetter return; 4015692a04cfSDaniel Vetter 40166e266956STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) 40175c673b60SDaniel Vetter mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 40184ebc6509SDhinakaran Pandiyan else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 40195c673b60SDaniel Vetter mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 40204ebc6509SDhinakaran Pandiyan else 40214ebc6509SDhinakaran Pandiyan mask = SDE_GMBUS_CPT; 40228664281bSPaulo Zanoni 40233488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4024d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 40252a57d9ccSImre Deak 40262a57d9ccSImre Deak if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 40272a57d9ccSImre Deak HAS_PCH_LPT(dev_priv)) 40281a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 40292a57d9ccSImre Deak else 40302a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 4031d46da437SPaulo Zanoni } 4032d46da437SPaulo Zanoni 40330a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 40340a9a8c91SDaniel Vetter { 4035fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40360a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 40370a9a8c91SDaniel Vetter 40380a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 40390a9a8c91SDaniel Vetter 40400a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 40413c9192bcSTvrtko Ursulin if (HAS_L3_DPF(dev_priv)) { 40420a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 4043772c2a51STvrtko Ursulin dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4044772c2a51STvrtko Ursulin gt_irqs |= GT_PARITY_ERROR(dev_priv); 40450a9a8c91SDaniel Vetter } 40460a9a8c91SDaniel Vetter 40470a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 4048*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 5)) { 4049f8973c21SChris Wilson gt_irqs |= ILK_BSD_USER_INTERRUPT; 40500a9a8c91SDaniel Vetter } else { 40510a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 40520a9a8c91SDaniel Vetter } 40530a9a8c91SDaniel Vetter 40543488d4ebSVille Syrjälä GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 40550a9a8c91SDaniel Vetter 4056b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 405778e68d36SImre Deak /* 405878e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS 405978e68d36SImre Deak * itself is enabled/disabled. 406078e68d36SImre Deak */ 4061f4e9af4fSAkash Goel if (HAS_VEBOX(dev_priv)) { 40620a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4063f4e9af4fSAkash Goel dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4064f4e9af4fSAkash Goel } 40650a9a8c91SDaniel Vetter 4066f4e9af4fSAkash Goel dev_priv->pm_imr = 0xffffffff; 40673488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 40680a9a8c91SDaniel Vetter } 40690a9a8c91SDaniel Vetter } 40700a9a8c91SDaniel Vetter 4071f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 4072036a4a7dSZhenyu Wang { 4073fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40748e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 40758e76f8dcSPaulo Zanoni 4076b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) { 40778e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4078842ebf7aSVille Syrjälä DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 40798e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 408023bb4cb5SVille Syrjälä DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 408123bb4cb5SVille Syrjälä DE_DP_A_HOTPLUG_IVB); 40828e76f8dcSPaulo Zanoni } else { 40838e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4084842ebf7aSVille Syrjälä DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4085842ebf7aSVille Syrjälä DE_PIPEA_CRC_DONE | DE_POISON); 4086e4ce95aaSVille Syrjälä extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4087e4ce95aaSVille Syrjälä DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4088e4ce95aaSVille Syrjälä DE_DP_A_HOTPLUG); 40898e76f8dcSPaulo Zanoni } 4090036a4a7dSZhenyu Wang 4091fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 4092fc340442SDaniel Vetter gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 40931aeb1b5fSDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4094fc340442SDaniel Vetter display_mask |= DE_EDP_PSR_INT_HSW; 4095fc340442SDaniel Vetter } 4096fc340442SDaniel Vetter 40971ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 4098036a4a7dSZhenyu Wang 4099622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4100622364b6SPaulo Zanoni 41013488d4ebSVille Syrjälä GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4102036a4a7dSZhenyu Wang 41030a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 4104036a4a7dSZhenyu Wang 41051a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 41061a56b1a2SImre Deak 4107d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 41087fe0b973SKeith Packard 410950a0bc90STvrtko Ursulin if (IS_IRONLAKE_M(dev_priv)) { 41106005ce42SDaniel Vetter /* Enable PCU event interrupts 41116005ce42SDaniel Vetter * 41126005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 41134bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 41144bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 4115d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4116fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4117d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4118f97108d1SJesse Barnes } 4119f97108d1SJesse Barnes 4120036a4a7dSZhenyu Wang return 0; 4121036a4a7dSZhenyu Wang } 4122036a4a7dSZhenyu Wang 4123f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4124f8b79e58SImre Deak { 412567520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4126f8b79e58SImre Deak 4127f8b79e58SImre Deak if (dev_priv->display_irqs_enabled) 4128f8b79e58SImre Deak return; 4129f8b79e58SImre Deak 4130f8b79e58SImre Deak dev_priv->display_irqs_enabled = true; 4131f8b79e58SImre Deak 4132d6c69803SVille Syrjälä if (intel_irqs_enabled(dev_priv)) { 4133d6c69803SVille Syrjälä vlv_display_irq_reset(dev_priv); 4134ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4135f8b79e58SImre Deak } 4136d6c69803SVille Syrjälä } 4137f8b79e58SImre Deak 4138f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4139f8b79e58SImre Deak { 414067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4141f8b79e58SImre Deak 4142f8b79e58SImre Deak if (!dev_priv->display_irqs_enabled) 4143f8b79e58SImre Deak return; 4144f8b79e58SImre Deak 4145f8b79e58SImre Deak dev_priv->display_irqs_enabled = false; 4146f8b79e58SImre Deak 4147950eabafSImre Deak if (intel_irqs_enabled(dev_priv)) 4148ad22d106SVille Syrjälä vlv_display_irq_reset(dev_priv); 4149f8b79e58SImre Deak } 4150f8b79e58SImre Deak 41510e6c9a9eSVille Syrjälä 41520e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev) 41530e6c9a9eSVille Syrjälä { 4154fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 41550e6c9a9eSVille Syrjälä 41560a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 41577e231dbeSJesse Barnes 4158ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 41599918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4160ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4161ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4162ad22d106SVille Syrjälä 41637e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 416434c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 416520afbda2SDaniel Vetter 416620afbda2SDaniel Vetter return 0; 416720afbda2SDaniel Vetter } 416820afbda2SDaniel Vetter 4169abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4170abd58f01SBen Widawsky { 4171abd58f01SBen Widawsky /* These are interrupts we'll toggle with the ring mask register */ 4172abd58f01SBen Widawsky uint32_t gt_interrupts[] = { 4173abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 417473d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 417573d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 417673d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4177abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 417873d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 417973d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 418073d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4181abd58f01SBen Widawsky 0, 418273d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 418373d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4184abd58f01SBen Widawsky }; 4185abd58f01SBen Widawsky 418698735739STvrtko Ursulin if (HAS_L3_DPF(dev_priv)) 418798735739STvrtko Ursulin gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 418898735739STvrtko Ursulin 4189f4e9af4fSAkash Goel dev_priv->pm_ier = 0x0; 4190f4e9af4fSAkash Goel dev_priv->pm_imr = ~dev_priv->pm_ier; 41919a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 41929a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 419378e68d36SImre Deak /* 419478e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS itself 419526705e20SSagar Arun Kamble * is enabled/disabled. Same wil be the case for GuC interrupts. 419678e68d36SImre Deak */ 4197f4e9af4fSAkash Goel GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 41989a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4199abd58f01SBen Widawsky } 4200abd58f01SBen Widawsky 4201abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4202abd58f01SBen Widawsky { 4203770de83dSDamien Lespiau uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4204770de83dSDamien Lespiau uint32_t de_pipe_enables; 42053a3b3c7dSVille Syrjälä u32 de_port_masked = GEN8_AUX_CHANNEL_A; 42063a3b3c7dSVille Syrjälä u32 de_port_enables; 4207df0d28c1SDhinakaran Pandiyan u32 de_misc_masked = GEN8_DE_EDP_PSR; 42083a3b3c7dSVille Syrjälä enum pipe pipe; 4209770de83dSDamien Lespiau 4210df0d28c1SDhinakaran Pandiyan if (INTEL_GEN(dev_priv) <= 10) 4211df0d28c1SDhinakaran Pandiyan de_misc_masked |= GEN8_DE_MISC_GSE; 4212df0d28c1SDhinakaran Pandiyan 4213bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) { 4214842ebf7aSVille Syrjälä de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 42153a3b3c7dSVille Syrjälä de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 421688e04703SJesse Barnes GEN9_AUX_CHANNEL_D; 4217cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 42183a3b3c7dSVille Syrjälä de_port_masked |= BXT_DE_PORT_GMBUS; 42193a3b3c7dSVille Syrjälä } else { 4220842ebf7aSVille Syrjälä de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 42213a3b3c7dSVille Syrjälä } 4222770de83dSDamien Lespiau 4223bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 4224bb187e93SJames Ausmus de_port_masked |= ICL_AUX_CHANNEL_E; 4225bb187e93SJames Ausmus 42269bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4227a324fcacSRodrigo Vivi de_port_masked |= CNL_AUX_CHANNEL_F; 4228a324fcacSRodrigo Vivi 4229770de83dSDamien Lespiau de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4230770de83dSDamien Lespiau GEN8_PIPE_FIFO_UNDERRUN; 4231770de83dSDamien Lespiau 42323a3b3c7dSVille Syrjälä de_port_enables = de_port_masked; 4233cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4234a52bb15bSVille Syrjälä de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4235a52bb15bSVille Syrjälä else if (IS_BROADWELL(dev_priv)) 42363a3b3c7dSVille Syrjälä de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 42373a3b3c7dSVille Syrjälä 4238e04f7eceSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 423954fd3149SDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4240e04f7eceSVille Syrjälä 42410a195c02SMika Kahola for_each_pipe(dev_priv, pipe) { 42420a195c02SMika Kahola dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4243abd58f01SBen Widawsky 4244f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 4245813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 4246813bde43SPaulo Zanoni GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4247813bde43SPaulo Zanoni dev_priv->de_irq_mask[pipe], 424835079899SPaulo Zanoni de_pipe_enables); 42490a195c02SMika Kahola } 4250abd58f01SBen Widawsky 42513488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 42523488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 42532a57d9ccSImre Deak 4254121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11) { 4255121e758eSDhinakaran Pandiyan u32 de_hpd_masked = 0; 4256b796b971SDhinakaran Pandiyan u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4257b796b971SDhinakaran Pandiyan GEN11_DE_TBT_HOTPLUG_MASK; 4258121e758eSDhinakaran Pandiyan 4259121e758eSDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4260121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 4261121e758eSDhinakaran Pandiyan } else if (IS_GEN9_LP(dev_priv)) { 42622a57d9ccSImre Deak bxt_hpd_detection_setup(dev_priv); 4263121e758eSDhinakaran Pandiyan } else if (IS_BROADWELL(dev_priv)) { 42641a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 4265abd58f01SBen Widawsky } 4266121e758eSDhinakaran Pandiyan } 4267abd58f01SBen Widawsky 4268abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev) 4269abd58f01SBen Widawsky { 4270fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4271abd58f01SBen Widawsky 42726e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4273622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4274622364b6SPaulo Zanoni 4275abd58f01SBen Widawsky gen8_gt_irq_postinstall(dev_priv); 4276abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 4277abd58f01SBen Widawsky 42786e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4279abd58f01SBen Widawsky ibx_irq_postinstall(dev); 4280abd58f01SBen Widawsky 42814376b9c9SMika Kuoppala gen8_master_intr_enable(dev_priv->regs); 4282abd58f01SBen Widawsky 4283abd58f01SBen Widawsky return 0; 4284abd58f01SBen Widawsky } 4285abd58f01SBen Widawsky 428651951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 428751951ae7SMika Kuoppala { 428851951ae7SMika Kuoppala const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 428951951ae7SMika Kuoppala 429051951ae7SMika Kuoppala BUILD_BUG_ON(irqs & 0xffff0000); 429151951ae7SMika Kuoppala 429251951ae7SMika Kuoppala /* Enable RCS, BCS, VCS and VECS class interrupts. */ 429351951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 429451951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 429551951ae7SMika Kuoppala 429651951ae7SMika Kuoppala /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 429751951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 429851951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 429951951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 430051951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 430151951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 430251951ae7SMika Kuoppala 4303d02b98b8SOscar Mateo /* 4304d02b98b8SOscar Mateo * RPS interrupts will get enabled/disabled on demand when RPS itself 4305d02b98b8SOscar Mateo * is enabled/disabled. 4306d02b98b8SOscar Mateo */ 4307d02b98b8SOscar Mateo dev_priv->pm_ier = 0x0; 4308d02b98b8SOscar Mateo dev_priv->pm_imr = ~dev_priv->pm_ier; 4309d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4310d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 431151951ae7SMika Kuoppala } 431251951ae7SMika Kuoppala 431331604222SAnusha Srivatsa static void icp_irq_postinstall(struct drm_device *dev) 431431604222SAnusha Srivatsa { 431531604222SAnusha Srivatsa struct drm_i915_private *dev_priv = to_i915(dev); 431631604222SAnusha Srivatsa u32 mask = SDE_GMBUS_ICP; 431731604222SAnusha Srivatsa 431831604222SAnusha Srivatsa WARN_ON(I915_READ(SDEIER) != 0); 431931604222SAnusha Srivatsa I915_WRITE(SDEIER, 0xffffffff); 432031604222SAnusha Srivatsa POSTING_READ(SDEIER); 432131604222SAnusha Srivatsa 432231604222SAnusha Srivatsa gen3_assert_iir_is_zero(dev_priv, SDEIIR); 432331604222SAnusha Srivatsa I915_WRITE(SDEIMR, ~mask); 432431604222SAnusha Srivatsa 432531604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 432631604222SAnusha Srivatsa } 432731604222SAnusha Srivatsa 432851951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev) 432951951ae7SMika Kuoppala { 433051951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 4331df0d28c1SDhinakaran Pandiyan u32 gu_misc_masked = GEN11_GU_MISC_GSE; 433251951ae7SMika Kuoppala 433331604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 433431604222SAnusha Srivatsa icp_irq_postinstall(dev); 433531604222SAnusha Srivatsa 433651951ae7SMika Kuoppala gen11_gt_irq_postinstall(dev_priv); 433751951ae7SMika Kuoppala gen8_de_irq_postinstall(dev_priv); 433851951ae7SMika Kuoppala 4339df0d28c1SDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4340df0d28c1SDhinakaran Pandiyan 434151951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 434251951ae7SMika Kuoppala 434381067b71SMika Kuoppala gen11_master_intr_enable(dev_priv->regs); 434451951ae7SMika Kuoppala 434551951ae7SMika Kuoppala return 0; 434651951ae7SMika Kuoppala } 434751951ae7SMika Kuoppala 434843f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev) 434943f328d7SVille Syrjälä { 4350fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 435143f328d7SVille Syrjälä 435243f328d7SVille Syrjälä gen8_gt_irq_postinstall(dev_priv); 435343f328d7SVille Syrjälä 4354ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 43559918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4356ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4357ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4358ad22d106SVille Syrjälä 4359e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 436043f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 436143f328d7SVille Syrjälä 436243f328d7SVille Syrjälä return 0; 436343f328d7SVille Syrjälä } 436443f328d7SVille Syrjälä 43656bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev) 4366c2798b19SChris Wilson { 4367fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4368c2798b19SChris Wilson 436944d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 437044d9241eSVille Syrjälä 4371d420a50cSVille Syrjälä I915_WRITE16(HWSTAM, 0xffff); 4372d420a50cSVille Syrjälä 4373e9e9848aSVille Syrjälä GEN2_IRQ_RESET(); 4374c2798b19SChris Wilson } 4375c2798b19SChris Wilson 4376c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 4377c2798b19SChris Wilson { 4378fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4379e9e9848aSVille Syrjälä u16 enable_mask; 4380c2798b19SChris Wilson 4381045cebd2SVille Syrjälä I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4382045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 4383c2798b19SChris Wilson 4384c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 4385c2798b19SChris Wilson dev_priv->irq_mask = 4386c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 438716659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 438816659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4389c2798b19SChris Wilson 4390e9e9848aSVille Syrjälä enable_mask = 4391c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4392c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 439316659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4394e9e9848aSVille Syrjälä I915_USER_INTERRUPT; 4395e9e9848aSVille Syrjälä 4396e9e9848aSVille Syrjälä GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4397c2798b19SChris Wilson 4398379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4399379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4400d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4401755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4402755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4403d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4404379ef82dSDaniel Vetter 4405c2798b19SChris Wilson return 0; 4406c2798b19SChris Wilson } 4407c2798b19SChris Wilson 440878c357ddSVille Syrjälä static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 440978c357ddSVille Syrjälä u16 *eir, u16 *eir_stuck) 441078c357ddSVille Syrjälä { 441178c357ddSVille Syrjälä u16 emr; 441278c357ddSVille Syrjälä 441378c357ddSVille Syrjälä *eir = I915_READ16(EIR); 441478c357ddSVille Syrjälä 441578c357ddSVille Syrjälä if (*eir) 441678c357ddSVille Syrjälä I915_WRITE16(EIR, *eir); 441778c357ddSVille Syrjälä 441878c357ddSVille Syrjälä *eir_stuck = I915_READ16(EIR); 441978c357ddSVille Syrjälä if (*eir_stuck == 0) 442078c357ddSVille Syrjälä return; 442178c357ddSVille Syrjälä 442278c357ddSVille Syrjälä /* 442378c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 442478c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 442578c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 442678c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 442778c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 442878c357ddSVille Syrjälä * cleared except by handling the underlying error 442978c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 443078c357ddSVille Syrjälä * remains set. 443178c357ddSVille Syrjälä */ 443278c357ddSVille Syrjälä emr = I915_READ16(EMR); 443378c357ddSVille Syrjälä I915_WRITE16(EMR, 0xffff); 443478c357ddSVille Syrjälä I915_WRITE16(EMR, emr | *eir_stuck); 443578c357ddSVille Syrjälä } 443678c357ddSVille Syrjälä 443778c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 443878c357ddSVille Syrjälä u16 eir, u16 eir_stuck) 443978c357ddSVille Syrjälä { 444078c357ddSVille Syrjälä DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 444178c357ddSVille Syrjälä 444278c357ddSVille Syrjälä if (eir_stuck) 444378c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 444478c357ddSVille Syrjälä } 444578c357ddSVille Syrjälä 444678c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 444778c357ddSVille Syrjälä u32 *eir, u32 *eir_stuck) 444878c357ddSVille Syrjälä { 444978c357ddSVille Syrjälä u32 emr; 445078c357ddSVille Syrjälä 445178c357ddSVille Syrjälä *eir = I915_READ(EIR); 445278c357ddSVille Syrjälä 445378c357ddSVille Syrjälä I915_WRITE(EIR, *eir); 445478c357ddSVille Syrjälä 445578c357ddSVille Syrjälä *eir_stuck = I915_READ(EIR); 445678c357ddSVille Syrjälä if (*eir_stuck == 0) 445778c357ddSVille Syrjälä return; 445878c357ddSVille Syrjälä 445978c357ddSVille Syrjälä /* 446078c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 446178c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 446278c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 446378c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 446478c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 446578c357ddSVille Syrjälä * cleared except by handling the underlying error 446678c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 446778c357ddSVille Syrjälä * remains set. 446878c357ddSVille Syrjälä */ 446978c357ddSVille Syrjälä emr = I915_READ(EMR); 447078c357ddSVille Syrjälä I915_WRITE(EMR, 0xffffffff); 447178c357ddSVille Syrjälä I915_WRITE(EMR, emr | *eir_stuck); 447278c357ddSVille Syrjälä } 447378c357ddSVille Syrjälä 447478c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 447578c357ddSVille Syrjälä u32 eir, u32 eir_stuck) 447678c357ddSVille Syrjälä { 447778c357ddSVille Syrjälä DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 447878c357ddSVille Syrjälä 447978c357ddSVille Syrjälä if (eir_stuck) 448078c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 448178c357ddSVille Syrjälä } 448278c357ddSVille Syrjälä 4483ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4484c2798b19SChris Wilson { 448545a83f84SDaniel Vetter struct drm_device *dev = arg; 4486fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4487af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4488c2798b19SChris Wilson 44892dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 44902dd2a883SImre Deak return IRQ_NONE; 44912dd2a883SImre Deak 44921f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 44931f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 44941f814dacSImre Deak 4495af722d28SVille Syrjälä do { 4496af722d28SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 449778c357ddSVille Syrjälä u16 eir = 0, eir_stuck = 0; 4498af722d28SVille Syrjälä u16 iir; 4499af722d28SVille Syrjälä 4500c2798b19SChris Wilson iir = I915_READ16(IIR); 4501c2798b19SChris Wilson if (iir == 0) 4502af722d28SVille Syrjälä break; 4503c2798b19SChris Wilson 4504af722d28SVille Syrjälä ret = IRQ_HANDLED; 4505c2798b19SChris Wilson 4506eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4507eb64343cSVille Syrjälä * signalled in iir */ 4508eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4509c2798b19SChris Wilson 451078c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 451178c357ddSVille Syrjälä i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 451278c357ddSVille Syrjälä 4513fd3a4024SDaniel Vetter I915_WRITE16(IIR, iir); 4514c2798b19SChris Wilson 4515c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 45163b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4517c2798b19SChris Wilson 451878c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 451978c357ddSVille Syrjälä i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4520af722d28SVille Syrjälä 4521eb64343cSVille Syrjälä i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4522af722d28SVille Syrjälä } while (0); 4523c2798b19SChris Wilson 45241f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 45251f814dacSImre Deak 45261f814dacSImre Deak return ret; 4527c2798b19SChris Wilson } 4528c2798b19SChris Wilson 45296bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev) 4530a266c7d5SChris Wilson { 4531fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4532a266c7d5SChris Wilson 453356b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 45340706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4535a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4536a266c7d5SChris Wilson } 4537a266c7d5SChris Wilson 453844d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 453944d9241eSVille Syrjälä 4540d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 454144d9241eSVille Syrjälä 4542ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4543a266c7d5SChris Wilson } 4544a266c7d5SChris Wilson 4545a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 4546a266c7d5SChris Wilson { 4547fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 454838bde180SChris Wilson u32 enable_mask; 4549a266c7d5SChris Wilson 4550045cebd2SVille Syrjälä I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4551045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 455238bde180SChris Wilson 455338bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 455438bde180SChris Wilson dev_priv->irq_mask = 455538bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 455638bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 455716659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 455816659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 455938bde180SChris Wilson 456038bde180SChris Wilson enable_mask = 456138bde180SChris Wilson I915_ASLE_INTERRUPT | 456238bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 456338bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 456416659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 456538bde180SChris Wilson I915_USER_INTERRUPT; 456638bde180SChris Wilson 456756b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 4568a266c7d5SChris Wilson /* Enable in IER... */ 4569a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4570a266c7d5SChris Wilson /* and unmask in IMR */ 4571a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4572a266c7d5SChris Wilson } 4573a266c7d5SChris Wilson 4574ba7eb789SVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4575a266c7d5SChris Wilson 4576379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4577379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4578d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4579755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4580755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4581d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4582379ef82dSDaniel Vetter 4583c30bb1fdSVille Syrjälä i915_enable_asle_pipestat(dev_priv); 4584c30bb1fdSVille Syrjälä 458520afbda2SDaniel Vetter return 0; 458620afbda2SDaniel Vetter } 458720afbda2SDaniel Vetter 4588ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 4589a266c7d5SChris Wilson { 459045a83f84SDaniel Vetter struct drm_device *dev = arg; 4591fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4592af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4593a266c7d5SChris Wilson 45942dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 45952dd2a883SImre Deak return IRQ_NONE; 45962dd2a883SImre Deak 45971f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 45981f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 45991f814dacSImre Deak 460038bde180SChris Wilson do { 4601eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 460278c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4603af722d28SVille Syrjälä u32 hotplug_status = 0; 4604af722d28SVille Syrjälä u32 iir; 4605a266c7d5SChris Wilson 4606af722d28SVille Syrjälä iir = I915_READ(IIR); 4607af722d28SVille Syrjälä if (iir == 0) 4608af722d28SVille Syrjälä break; 4609af722d28SVille Syrjälä 4610af722d28SVille Syrjälä ret = IRQ_HANDLED; 4611af722d28SVille Syrjälä 4612af722d28SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv) && 4613af722d28SVille Syrjälä iir & I915_DISPLAY_PORT_INTERRUPT) 4614af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4615a266c7d5SChris Wilson 4616eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4617eb64343cSVille Syrjälä * signalled in iir */ 4618eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4619a266c7d5SChris Wilson 462078c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 462178c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 462278c357ddSVille Syrjälä 4623fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4624a266c7d5SChris Wilson 4625a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 46263b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4627a266c7d5SChris Wilson 462878c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 462978c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4630a266c7d5SChris Wilson 4631af722d28SVille Syrjälä if (hotplug_status) 4632af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4633af722d28SVille Syrjälä 4634af722d28SVille Syrjälä i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4635af722d28SVille Syrjälä } while (0); 4636a266c7d5SChris Wilson 46371f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 46381f814dacSImre Deak 4639a266c7d5SChris Wilson return ret; 4640a266c7d5SChris Wilson } 4641a266c7d5SChris Wilson 46426bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev) 4643a266c7d5SChris Wilson { 4644fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4645a266c7d5SChris Wilson 46460706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4647a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4648a266c7d5SChris Wilson 464944d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 465044d9241eSVille Syrjälä 4651d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 465244d9241eSVille Syrjälä 4653ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4654a266c7d5SChris Wilson } 4655a266c7d5SChris Wilson 4656a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 4657a266c7d5SChris Wilson { 4658fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4659bbba0a97SChris Wilson u32 enable_mask; 4660a266c7d5SChris Wilson u32 error_mask; 4661a266c7d5SChris Wilson 4662045cebd2SVille Syrjälä /* 4663045cebd2SVille Syrjälä * Enable some error detection, note the instruction error mask 4664045cebd2SVille Syrjälä * bit is reserved, so we leave it masked. 4665045cebd2SVille Syrjälä */ 4666045cebd2SVille Syrjälä if (IS_G4X(dev_priv)) { 4667045cebd2SVille Syrjälä error_mask = ~(GM45_ERROR_PAGE_TABLE | 4668045cebd2SVille Syrjälä GM45_ERROR_MEM_PRIV | 4669045cebd2SVille Syrjälä GM45_ERROR_CP_PRIV | 4670045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4671045cebd2SVille Syrjälä } else { 4672045cebd2SVille Syrjälä error_mask = ~(I915_ERROR_PAGE_TABLE | 4673045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4674045cebd2SVille Syrjälä } 4675045cebd2SVille Syrjälä I915_WRITE(EMR, error_mask); 4676045cebd2SVille Syrjälä 4677a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 4678c30bb1fdSVille Syrjälä dev_priv->irq_mask = 4679c30bb1fdSVille Syrjälä ~(I915_ASLE_INTERRUPT | 4680adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 4681bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4682bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 468378c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4684bbba0a97SChris Wilson 4685c30bb1fdSVille Syrjälä enable_mask = 4686c30bb1fdSVille Syrjälä I915_ASLE_INTERRUPT | 4687c30bb1fdSVille Syrjälä I915_DISPLAY_PORT_INTERRUPT | 4688c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4689c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 469078c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4691c30bb1fdSVille Syrjälä I915_USER_INTERRUPT; 4692bbba0a97SChris Wilson 469391d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4694bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 4695a266c7d5SChris Wilson 4696c30bb1fdSVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4697c30bb1fdSVille Syrjälä 4698b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4699b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4700d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4701755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4702755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4703755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4704d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4705a266c7d5SChris Wilson 470691d14251STvrtko Ursulin i915_enable_asle_pipestat(dev_priv); 470720afbda2SDaniel Vetter 470820afbda2SDaniel Vetter return 0; 470920afbda2SDaniel Vetter } 471020afbda2SDaniel Vetter 471191d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 471220afbda2SDaniel Vetter { 471320afbda2SDaniel Vetter u32 hotplug_en; 471420afbda2SDaniel Vetter 471567520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4716b5ea2d56SDaniel Vetter 4717adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 4718e5868a31SEgbert Eich /* enable bits are the same for all generations */ 471991d14251STvrtko Ursulin hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4720a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 4721a266c7d5SChris Wilson to generate a spurious hotplug event about three 4722a266c7d5SChris Wilson seconds later. So just do it once. 4723a266c7d5SChris Wilson */ 472491d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4725a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4726a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4727a266c7d5SChris Wilson 4728a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 47290706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, 4730f9e3dc78SJani Nikula HOTPLUG_INT_EN_MASK | 4731f9e3dc78SJani Nikula CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4732f9e3dc78SJani Nikula CRT_HOTPLUG_ACTIVATION_PERIOD_64, 47330706f17cSEgbert Eich hotplug_en); 4734a266c7d5SChris Wilson } 4735a266c7d5SChris Wilson 4736ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 4737a266c7d5SChris Wilson { 473845a83f84SDaniel Vetter struct drm_device *dev = arg; 4739fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4740af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4741a266c7d5SChris Wilson 47422dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 47432dd2a883SImre Deak return IRQ_NONE; 47442dd2a883SImre Deak 47451f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 47461f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 47471f814dacSImre Deak 4748af722d28SVille Syrjälä do { 4749eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 475078c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4751af722d28SVille Syrjälä u32 hotplug_status = 0; 4752af722d28SVille Syrjälä u32 iir; 47532c8ba29fSChris Wilson 4754af722d28SVille Syrjälä iir = I915_READ(IIR); 4755af722d28SVille Syrjälä if (iir == 0) 4756af722d28SVille Syrjälä break; 4757af722d28SVille Syrjälä 4758af722d28SVille Syrjälä ret = IRQ_HANDLED; 4759af722d28SVille Syrjälä 4760af722d28SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 4761af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4762a266c7d5SChris Wilson 4763eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4764eb64343cSVille Syrjälä * signalled in iir */ 4765eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4766a266c7d5SChris Wilson 476778c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 476878c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 476978c357ddSVille Syrjälä 4770fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4771a266c7d5SChris Wilson 4772a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 47733b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4774af722d28SVille Syrjälä 4775a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 47763b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 4777a266c7d5SChris Wilson 477878c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 477978c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4780515ac2bbSDaniel Vetter 4781af722d28SVille Syrjälä if (hotplug_status) 4782af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4783af722d28SVille Syrjälä 4784af722d28SVille Syrjälä i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4785af722d28SVille Syrjälä } while (0); 4786a266c7d5SChris Wilson 47871f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 47881f814dacSImre Deak 4789a266c7d5SChris Wilson return ret; 4790a266c7d5SChris Wilson } 4791a266c7d5SChris Wilson 4792fca52a55SDaniel Vetter /** 4793fca52a55SDaniel Vetter * intel_irq_init - initializes irq support 4794fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4795fca52a55SDaniel Vetter * 4796fca52a55SDaniel Vetter * This function initializes all the irq support including work items, timers 4797fca52a55SDaniel Vetter * and all the vtables. It does not setup the interrupt itself though. 4798fca52a55SDaniel Vetter */ 4799b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv) 4800f71d4af4SJesse Barnes { 480191c8a326SChris Wilson struct drm_device *dev = &dev_priv->drm; 4802562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 4803cefcff8fSJoonas Lahtinen int i; 48048b2e326dSChris Wilson 480577913b39SJani Nikula intel_hpd_init_work(dev_priv); 480677913b39SJani Nikula 4807562d9baeSSagar Arun Kamble INIT_WORK(&rps->work, gen6_pm_rps_work); 4808cefcff8fSJoonas Lahtinen 4809a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4810cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4811cefcff8fSJoonas Lahtinen dev_priv->l3_parity.remap_info[i] = NULL; 48128b2e326dSChris Wilson 48134805fe82STvrtko Ursulin if (HAS_GUC_SCHED(dev_priv)) 481426705e20SSagar Arun Kamble dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 481526705e20SSagar Arun Kamble 4816a6706b45SDeepak S /* Let's track the enabled rps events */ 4817666a4537SWayne Boyer if (IS_VALLEYVIEW(dev_priv)) 48186c65a587SVille Syrjälä /* WaGsvRC0ResidencyMethod:vlv */ 4819e0e8c7cbSChris Wilson dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 482031685c25SDeepak S else 48214668f695SChris Wilson dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | 48224668f695SChris Wilson GEN6_PM_RP_DOWN_THRESHOLD | 48234668f695SChris Wilson GEN6_PM_RP_DOWN_TIMEOUT); 4824a6706b45SDeepak S 4825562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz = 0; 48261800ad25SSagar Arun Kamble 48271800ad25SSagar Arun Kamble /* 4828acf2dc22SMika Kuoppala * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 48291800ad25SSagar Arun Kamble * if GEN6_PM_UP_EI_EXPIRED is masked. 48301800ad25SSagar Arun Kamble * 48311800ad25SSagar Arun Kamble * TODO: verify if this can be reproduced on VLV,CHV. 48321800ad25SSagar Arun Kamble */ 4833bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) <= 7) 4834562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 48351800ad25SSagar Arun Kamble 4836bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 4837562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 48381800ad25SSagar Arun Kamble 4839*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) { 48404194c088SRodrigo Vivi /* Gen2 doesn't have a hardware frame counter */ 48414cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 4842bca2bf2aSPandiyan, Dhinakaran } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4843f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4844fd8f507cSVille Syrjälä dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4845391f75e2SVille Syrjälä } else { 4846391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 4847391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4848f71d4af4SJesse Barnes } 4849f71d4af4SJesse Barnes 485021da2700SVille Syrjälä /* 485121da2700SVille Syrjälä * Opt out of the vblank disable timer on everything except gen2. 485221da2700SVille Syrjälä * Gen2 doesn't have a hardware frame counter and so depends on 485321da2700SVille Syrjälä * vblank interrupts to produce sane vblank seuquence numbers. 485421da2700SVille Syrjälä */ 4855*cf819effSLucas De Marchi if (!IS_GEN(dev_priv, 2)) 485621da2700SVille Syrjälä dev->vblank_disable_immediate = true; 485721da2700SVille Syrjälä 4858262fd485SChris Wilson /* Most platforms treat the display irq block as an always-on 4859262fd485SChris Wilson * power domain. vlv/chv can disable it at runtime and need 4860262fd485SChris Wilson * special care to avoid writing any of the display block registers 4861262fd485SChris Wilson * outside of the power domain. We defer setting up the display irqs 4862262fd485SChris Wilson * in this case to the runtime pm. 4863262fd485SChris Wilson */ 4864262fd485SChris Wilson dev_priv->display_irqs_enabled = true; 4865262fd485SChris Wilson if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4866262fd485SChris Wilson dev_priv->display_irqs_enabled = false; 4867262fd485SChris Wilson 4868317eaa95SLyude dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 48699a64c650SLyude Paul /* If we have MST support, we want to avoid doing short HPD IRQ storm 48709a64c650SLyude Paul * detection, as short HPD storms will occur as a natural part of 48719a64c650SLyude Paul * sideband messaging with MST. 48729a64c650SLyude Paul * On older platforms however, IRQ storms can occur with both long and 48739a64c650SLyude Paul * short pulses, as seen on some G4x systems. 48749a64c650SLyude Paul */ 48759a64c650SLyude Paul dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); 4876317eaa95SLyude 48771bf6ad62SDaniel Vetter dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4878f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4879f71d4af4SJesse Barnes 4880b963291cSDaniel Vetter if (IS_CHERRYVIEW(dev_priv)) { 488143f328d7SVille Syrjälä dev->driver->irq_handler = cherryview_irq_handler; 48826bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = cherryview_irq_reset; 488343f328d7SVille Syrjälä dev->driver->irq_postinstall = cherryview_irq_postinstall; 48846bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = cherryview_irq_reset; 488586e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 488686e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 488743f328d7SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4888b963291cSDaniel Vetter } else if (IS_VALLEYVIEW(dev_priv)) { 48897e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 48906bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = valleyview_irq_reset; 48917e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 48926bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = valleyview_irq_reset; 489386e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 489486e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4895fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 489651951ae7SMika Kuoppala } else if (INTEL_GEN(dev_priv) >= 11) { 489751951ae7SMika Kuoppala dev->driver->irq_handler = gen11_irq_handler; 489851951ae7SMika Kuoppala dev->driver->irq_preinstall = gen11_irq_reset; 489951951ae7SMika Kuoppala dev->driver->irq_postinstall = gen11_irq_postinstall; 490051951ae7SMika Kuoppala dev->driver->irq_uninstall = gen11_irq_reset; 490151951ae7SMika Kuoppala dev->driver->enable_vblank = gen8_enable_vblank; 490251951ae7SMika Kuoppala dev->driver->disable_vblank = gen8_disable_vblank; 4903121e758eSDhinakaran Pandiyan dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4904bca2bf2aSPandiyan, Dhinakaran } else if (INTEL_GEN(dev_priv) >= 8) { 4905abd58f01SBen Widawsky dev->driver->irq_handler = gen8_irq_handler; 4906723761b8SDaniel Vetter dev->driver->irq_preinstall = gen8_irq_reset; 4907abd58f01SBen Widawsky dev->driver->irq_postinstall = gen8_irq_postinstall; 49086bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = gen8_irq_reset; 4909abd58f01SBen Widawsky dev->driver->enable_vblank = gen8_enable_vblank; 4910abd58f01SBen Widawsky dev->driver->disable_vblank = gen8_disable_vblank; 4911cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4912e0a20ad7SShashank Sharma dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 49137b22b8c4SRodrigo Vivi else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 49147b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 49156dbf30ceSVille Syrjälä dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 49166dbf30ceSVille Syrjälä else 49173a3b3c7dSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 49186e266956STvrtko Ursulin } else if (HAS_PCH_SPLIT(dev_priv)) { 4919f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 4920723761b8SDaniel Vetter dev->driver->irq_preinstall = ironlake_irq_reset; 4921f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 49226bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = ironlake_irq_reset; 4923f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 4924f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 4925e4ce95aaSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4926f71d4af4SJesse Barnes } else { 4927*cf819effSLucas De Marchi if (IS_GEN(dev_priv, 2)) { 49286bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i8xx_irq_reset; 4929c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 4930c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 49316bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i8xx_irq_reset; 493286e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 493386e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4934*cf819effSLucas De Marchi } else if (IS_GEN(dev_priv, 3)) { 49356bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i915_irq_reset; 4936a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 49376bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i915_irq_reset; 4938a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 493986e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 494086e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4941c2798b19SChris Wilson } else { 49426bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i965_irq_reset; 4943a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 49446bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i965_irq_reset; 4945a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 494686e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 494786e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4948c2798b19SChris Wilson } 4949778eb334SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv)) 4950778eb334SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4951f71d4af4SJesse Barnes } 4952f71d4af4SJesse Barnes } 495320afbda2SDaniel Vetter 4954fca52a55SDaniel Vetter /** 4955cefcff8fSJoonas Lahtinen * intel_irq_fini - deinitializes IRQ support 4956cefcff8fSJoonas Lahtinen * @i915: i915 device instance 4957cefcff8fSJoonas Lahtinen * 4958cefcff8fSJoonas Lahtinen * This function deinitializes all the IRQ support. 4959cefcff8fSJoonas Lahtinen */ 4960cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915) 4961cefcff8fSJoonas Lahtinen { 4962cefcff8fSJoonas Lahtinen int i; 4963cefcff8fSJoonas Lahtinen 4964cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4965cefcff8fSJoonas Lahtinen kfree(i915->l3_parity.remap_info[i]); 4966cefcff8fSJoonas Lahtinen } 4967cefcff8fSJoonas Lahtinen 4968cefcff8fSJoonas Lahtinen /** 4969fca52a55SDaniel Vetter * intel_irq_install - enables the hardware interrupt 4970fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4971fca52a55SDaniel Vetter * 4972fca52a55SDaniel Vetter * This function enables the hardware interrupt handling, but leaves the hotplug 4973fca52a55SDaniel Vetter * handling still disabled. It is called after intel_irq_init(). 4974fca52a55SDaniel Vetter * 4975fca52a55SDaniel Vetter * In the driver load and resume code we need working interrupts in a few places 4976fca52a55SDaniel Vetter * but don't want to deal with the hassle of concurrent probe and hotplug 4977fca52a55SDaniel Vetter * workers. Hence the split into this two-stage approach. 4978fca52a55SDaniel Vetter */ 49792aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv) 49802aeb7d3aSDaniel Vetter { 49812aeb7d3aSDaniel Vetter /* 49822aeb7d3aSDaniel Vetter * We enable some interrupt sources in our postinstall hooks, so mark 49832aeb7d3aSDaniel Vetter * interrupts as enabled _before_ actually enabling them to avoid 49842aeb7d3aSDaniel Vetter * special cases in our ordering checks. 49852aeb7d3aSDaniel Vetter */ 4986ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 49872aeb7d3aSDaniel Vetter 498891c8a326SChris Wilson return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 49892aeb7d3aSDaniel Vetter } 49902aeb7d3aSDaniel Vetter 4991fca52a55SDaniel Vetter /** 4992fca52a55SDaniel Vetter * intel_irq_uninstall - finilizes all irq handling 4993fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4994fca52a55SDaniel Vetter * 4995fca52a55SDaniel Vetter * This stops interrupt and hotplug handling and unregisters and frees all 4996fca52a55SDaniel Vetter * resources acquired in the init functions. 4997fca52a55SDaniel Vetter */ 49982aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv) 49992aeb7d3aSDaniel Vetter { 500091c8a326SChris Wilson drm_irq_uninstall(&dev_priv->drm); 50012aeb7d3aSDaniel Vetter intel_hpd_cancel_work(dev_priv); 5002ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 50032aeb7d3aSDaniel Vetter } 50042aeb7d3aSDaniel Vetter 5005fca52a55SDaniel Vetter /** 5006fca52a55SDaniel Vetter * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 5007fca52a55SDaniel Vetter * @dev_priv: i915 device instance 5008fca52a55SDaniel Vetter * 5009fca52a55SDaniel Vetter * This function is used to disable interrupts at runtime, both in the runtime 5010fca52a55SDaniel Vetter * pm and the system suspend/resume code. 5011fca52a55SDaniel Vetter */ 5012b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 5013c67a470bSPaulo Zanoni { 501491c8a326SChris Wilson dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 5015ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 501691c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 5017c67a470bSPaulo Zanoni } 5018c67a470bSPaulo Zanoni 5019fca52a55SDaniel Vetter /** 5020fca52a55SDaniel Vetter * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 5021fca52a55SDaniel Vetter * @dev_priv: i915 device instance 5022fca52a55SDaniel Vetter * 5023fca52a55SDaniel Vetter * This function is used to enable interrupts at runtime, both in the runtime 5024fca52a55SDaniel Vetter * pm and the system suspend/resume code. 5025fca52a55SDaniel Vetter */ 5026b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 5027c67a470bSPaulo Zanoni { 5028ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 502991c8a326SChris Wilson dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 503091c8a326SChris Wilson dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 5031c67a470bSPaulo Zanoni } 5032