1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3163eeaf38SJesse Barnes #include <linux/sysrq.h> 325a0e3ad6STejun Heo #include <linux/slab.h> 33b2c88f5bSDamien Lespiau #include <linux/circ_buf.h> 34760285e7SDavid Howells #include <drm/drmP.h> 35760285e7SDavid Howells #include <drm/i915_drm.h> 36c0e09200SDave Airlie #include "i915_drv.h" 371c5d22f7SChris Wilson #include "i915_trace.h" 3879e53945SJesse Barnes #include "intel_drv.h" 39c0e09200SDave Airlie 40fca52a55SDaniel Vetter /** 41fca52a55SDaniel Vetter * DOC: interrupt handling 42fca52a55SDaniel Vetter * 43fca52a55SDaniel Vetter * These functions provide the basic support for enabling and disabling the 44fca52a55SDaniel Vetter * interrupt handling support. There's a lot more functionality in i915_irq.c 45fca52a55SDaniel Vetter * and related files, but that will be described in separate chapters. 46fca52a55SDaniel Vetter */ 47fca52a55SDaniel Vetter 48e4ce95aaSVille Syrjälä static const u32 hpd_ilk[HPD_NUM_PINS] = { 49e4ce95aaSVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50e4ce95aaSVille Syrjälä }; 51e4ce95aaSVille Syrjälä 5223bb4cb5SVille Syrjälä static const u32 hpd_ivb[HPD_NUM_PINS] = { 5323bb4cb5SVille Syrjälä [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 5423bb4cb5SVille Syrjälä }; 5523bb4cb5SVille Syrjälä 563a3b3c7dSVille Syrjälä static const u32 hpd_bdw[HPD_NUM_PINS] = { 573a3b3c7dSVille Syrjälä [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 583a3b3c7dSVille Syrjälä }; 593a3b3c7dSVille Syrjälä 607c7e10dbSVille Syrjälä static const u32 hpd_ibx[HPD_NUM_PINS] = { 61e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG, 62e5868a31SEgbert Eich [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66e5868a31SEgbert Eich }; 67e5868a31SEgbert Eich 687c7e10dbSVille Syrjälä static const u32 hpd_cpt[HPD_NUM_PINS] = { 69e5868a31SEgbert Eich [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 7073c352a2SDaniel Vetter [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71e5868a31SEgbert Eich [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72e5868a31SEgbert Eich [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73e5868a31SEgbert Eich [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74e5868a31SEgbert Eich }; 75e5868a31SEgbert Eich 7626951cafSXiong Zhang static const u32 hpd_spt[HPD_NUM_PINS] = { 7774c0b395SVille Syrjälä [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 7826951cafSXiong Zhang [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 7926951cafSXiong Zhang [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 8026951cafSXiong Zhang [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 8126951cafSXiong Zhang [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 8226951cafSXiong Zhang }; 8326951cafSXiong Zhang 847c7e10dbSVille Syrjälä static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91e5868a31SEgbert Eich }; 92e5868a31SEgbert Eich 937c7e10dbSVille Syrjälä static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100e5868a31SEgbert Eich }; 101e5868a31SEgbert Eich 1024bca26d0SVille Syrjälä static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103e5868a31SEgbert Eich [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104e5868a31SEgbert Eich [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105e5868a31SEgbert Eich [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106e5868a31SEgbert Eich [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107e5868a31SEgbert Eich [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108e5868a31SEgbert Eich [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109e5868a31SEgbert Eich }; 110e5868a31SEgbert Eich 111e0a20ad7SShashank Sharma /* BXT hpd list */ 112e0a20ad7SShashank Sharma static const u32 hpd_bxt[HPD_NUM_PINS] = { 1137f3561beSSonika Jindal [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114e0a20ad7SShashank Sharma [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115e0a20ad7SShashank Sharma [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116e0a20ad7SShashank Sharma }; 117e0a20ad7SShashank Sharma 118b796b971SDhinakaran Pandiyan static const u32 hpd_gen11[HPD_NUM_PINS] = { 119b796b971SDhinakaran Pandiyan [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120b796b971SDhinakaran Pandiyan [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121b796b971SDhinakaran Pandiyan [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122b796b971SDhinakaran Pandiyan [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123121e758eSDhinakaran Pandiyan }; 124121e758eSDhinakaran Pandiyan 12531604222SAnusha Srivatsa static const u32 hpd_icp[HPD_NUM_PINS] = { 12631604222SAnusha Srivatsa [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 12731604222SAnusha Srivatsa [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 12831604222SAnusha Srivatsa [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 12931604222SAnusha Srivatsa [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 13031604222SAnusha Srivatsa [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 13131604222SAnusha Srivatsa [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 13231604222SAnusha Srivatsa }; 13331604222SAnusha Srivatsa 1345c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 135f86f3fb0SPaulo Zanoni #define GEN8_IRQ_RESET_NDX(type, which) do { \ 1365c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 1375c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IMR(which)); \ 1385c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), 0); \ 1395c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1405c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1415c502442SPaulo Zanoni I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 1425c502442SPaulo Zanoni POSTING_READ(GEN8_##type##_IIR(which)); \ 1435c502442SPaulo Zanoni } while (0) 1445c502442SPaulo Zanoni 1453488d4ebSVille Syrjälä #define GEN3_IRQ_RESET(type) do { \ 146a9d356a6SPaulo Zanoni I915_WRITE(type##IMR, 0xffffffff); \ 1475c502442SPaulo Zanoni POSTING_READ(type##IMR); \ 148a9d356a6SPaulo Zanoni I915_WRITE(type##IER, 0); \ 1495c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1505c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 1515c502442SPaulo Zanoni I915_WRITE(type##IIR, 0xffffffff); \ 1525c502442SPaulo Zanoni POSTING_READ(type##IIR); \ 153a9d356a6SPaulo Zanoni } while (0) 154a9d356a6SPaulo Zanoni 155e9e9848aSVille Syrjälä #define GEN2_IRQ_RESET(type) do { \ 156e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, 0xffff); \ 157e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 158e9e9848aSVille Syrjälä I915_WRITE16(type##IER, 0); \ 159e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 160e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 161e9e9848aSVille Syrjälä I915_WRITE16(type##IIR, 0xffff); \ 162e9e9848aSVille Syrjälä POSTING_READ16(type##IIR); \ 163e9e9848aSVille Syrjälä } while (0) 164e9e9848aSVille Syrjälä 165337ba017SPaulo Zanoni /* 166337ba017SPaulo Zanoni * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167337ba017SPaulo Zanoni */ 1683488d4ebSVille Syrjälä static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169f0f59a00SVille Syrjälä i915_reg_t reg) 170b51a2842SVille Syrjälä { 171b51a2842SVille Syrjälä u32 val = I915_READ(reg); 172b51a2842SVille Syrjälä 173b51a2842SVille Syrjälä if (val == 0) 174b51a2842SVille Syrjälä return; 175b51a2842SVille Syrjälä 176b51a2842SVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177f0f59a00SVille Syrjälä i915_mmio_reg_offset(reg), val); 178b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 179b51a2842SVille Syrjälä POSTING_READ(reg); 180b51a2842SVille Syrjälä I915_WRITE(reg, 0xffffffff); 181b51a2842SVille Syrjälä POSTING_READ(reg); 182b51a2842SVille Syrjälä } 183337ba017SPaulo Zanoni 184e9e9848aSVille Syrjälä static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185e9e9848aSVille Syrjälä i915_reg_t reg) 186e9e9848aSVille Syrjälä { 187e9e9848aSVille Syrjälä u16 val = I915_READ16(reg); 188e9e9848aSVille Syrjälä 189e9e9848aSVille Syrjälä if (val == 0) 190e9e9848aSVille Syrjälä return; 191e9e9848aSVille Syrjälä 192e9e9848aSVille Syrjälä WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193e9e9848aSVille Syrjälä i915_mmio_reg_offset(reg), val); 194e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 195e9e9848aSVille Syrjälä POSTING_READ16(reg); 196e9e9848aSVille Syrjälä I915_WRITE16(reg, 0xffff); 197e9e9848aSVille Syrjälä POSTING_READ16(reg); 198e9e9848aSVille Syrjälä } 199e9e9848aSVille Syrjälä 20035079899SPaulo Zanoni #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 2013488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 20235079899SPaulo Zanoni I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 2037d1bd539SVille Syrjälä I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 2047d1bd539SVille Syrjälä POSTING_READ(GEN8_##type##_IMR(which)); \ 20535079899SPaulo Zanoni } while (0) 20635079899SPaulo Zanoni 2073488d4ebSVille Syrjälä #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 2083488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 20935079899SPaulo Zanoni I915_WRITE(type##IER, (ier_val)); \ 2107d1bd539SVille Syrjälä I915_WRITE(type##IMR, (imr_val)); \ 2117d1bd539SVille Syrjälä POSTING_READ(type##IMR); \ 21235079899SPaulo Zanoni } while (0) 21335079899SPaulo Zanoni 214e9e9848aSVille Syrjälä #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215e9e9848aSVille Syrjälä gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216e9e9848aSVille Syrjälä I915_WRITE16(type##IER, (ier_val)); \ 217e9e9848aSVille Syrjälä I915_WRITE16(type##IMR, (imr_val)); \ 218e9e9848aSVille Syrjälä POSTING_READ16(type##IMR); \ 219e9e9848aSVille Syrjälä } while (0) 220e9e9848aSVille Syrjälä 221c9a9a268SImre Deak static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 22226705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223c9a9a268SImre Deak 2240706f17cSEgbert Eich /* For display hotplug interrupt */ 2250706f17cSEgbert Eich static inline void 2260706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 2270706f17cSEgbert Eich uint32_t mask, 2280706f17cSEgbert Eich uint32_t bits) 2290706f17cSEgbert Eich { 2300706f17cSEgbert Eich uint32_t val; 2310706f17cSEgbert Eich 23267520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 2330706f17cSEgbert Eich WARN_ON(bits & ~mask); 2340706f17cSEgbert Eich 2350706f17cSEgbert Eich val = I915_READ(PORT_HOTPLUG_EN); 2360706f17cSEgbert Eich val &= ~mask; 2370706f17cSEgbert Eich val |= bits; 2380706f17cSEgbert Eich I915_WRITE(PORT_HOTPLUG_EN, val); 2390706f17cSEgbert Eich } 2400706f17cSEgbert Eich 2410706f17cSEgbert Eich /** 2420706f17cSEgbert Eich * i915_hotplug_interrupt_update - update hotplug interrupt enable 2430706f17cSEgbert Eich * @dev_priv: driver private 2440706f17cSEgbert Eich * @mask: bits to update 2450706f17cSEgbert Eich * @bits: bits to enable 2460706f17cSEgbert Eich * NOTE: the HPD enable bits are modified both inside and outside 2470706f17cSEgbert Eich * of an interrupt context. To avoid that read-modify-write cycles 2480706f17cSEgbert Eich * interfer, these bits are protected by a spinlock. Since this 2490706f17cSEgbert Eich * function is usually not called from a context where the lock is 2500706f17cSEgbert Eich * held already, this function acquires the lock itself. A non-locking 2510706f17cSEgbert Eich * version is also available. 2520706f17cSEgbert Eich */ 2530706f17cSEgbert Eich void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 2540706f17cSEgbert Eich uint32_t mask, 2550706f17cSEgbert Eich uint32_t bits) 2560706f17cSEgbert Eich { 2570706f17cSEgbert Eich spin_lock_irq(&dev_priv->irq_lock); 2580706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 2590706f17cSEgbert Eich spin_unlock_irq(&dev_priv->irq_lock); 2600706f17cSEgbert Eich } 2610706f17cSEgbert Eich 26296606f3bSOscar Mateo static u32 26396606f3bSOscar Mateo gen11_gt_engine_identity(struct drm_i915_private * const i915, 26496606f3bSOscar Mateo const unsigned int bank, const unsigned int bit); 26596606f3bSOscar Mateo 266ff047a87SOscar Mateo bool gen11_reset_one_iir(struct drm_i915_private * const i915, 26796606f3bSOscar Mateo const unsigned int bank, 26896606f3bSOscar Mateo const unsigned int bit) 26996606f3bSOscar Mateo { 27096606f3bSOscar Mateo void __iomem * const regs = i915->regs; 27196606f3bSOscar Mateo u32 dw; 27296606f3bSOscar Mateo 27396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 27496606f3bSOscar Mateo 27596606f3bSOscar Mateo dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 27696606f3bSOscar Mateo if (dw & BIT(bit)) { 27796606f3bSOscar Mateo /* 27896606f3bSOscar Mateo * According to the BSpec, DW_IIR bits cannot be cleared without 27996606f3bSOscar Mateo * first servicing the Selector & Shared IIR registers. 28096606f3bSOscar Mateo */ 28196606f3bSOscar Mateo gen11_gt_engine_identity(i915, bank, bit); 28296606f3bSOscar Mateo 28396606f3bSOscar Mateo /* 28496606f3bSOscar Mateo * We locked GT INT DW by reading it. If we want to (try 28596606f3bSOscar Mateo * to) recover from this succesfully, we need to clear 28696606f3bSOscar Mateo * our bit, otherwise we are locking the register for 28796606f3bSOscar Mateo * everybody. 28896606f3bSOscar Mateo */ 28996606f3bSOscar Mateo raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 29096606f3bSOscar Mateo 29196606f3bSOscar Mateo return true; 29296606f3bSOscar Mateo } 29396606f3bSOscar Mateo 29496606f3bSOscar Mateo return false; 29596606f3bSOscar Mateo } 29696606f3bSOscar Mateo 297d9dc34f1SVille Syrjälä /** 298d9dc34f1SVille Syrjälä * ilk_update_display_irq - update DEIMR 299d9dc34f1SVille Syrjälä * @dev_priv: driver private 300d9dc34f1SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 301d9dc34f1SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 302d9dc34f1SVille Syrjälä */ 303fbdedaeaSVille Syrjälä void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304d9dc34f1SVille Syrjälä uint32_t interrupt_mask, 305d9dc34f1SVille Syrjälä uint32_t enabled_irq_mask) 306036a4a7dSZhenyu Wang { 307d9dc34f1SVille Syrjälä uint32_t new_val; 308d9dc34f1SVille Syrjälä 30967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 3104bc9d430SDaniel Vetter 311d9dc34f1SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 312d9dc34f1SVille Syrjälä 3139df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314c67a470bSPaulo Zanoni return; 315c67a470bSPaulo Zanoni 316d9dc34f1SVille Syrjälä new_val = dev_priv->irq_mask; 317d9dc34f1SVille Syrjälä new_val &= ~interrupt_mask; 318d9dc34f1SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 319d9dc34f1SVille Syrjälä 320d9dc34f1SVille Syrjälä if (new_val != dev_priv->irq_mask) { 321d9dc34f1SVille Syrjälä dev_priv->irq_mask = new_val; 3221ec14ad3SChris Wilson I915_WRITE(DEIMR, dev_priv->irq_mask); 3233143a2bfSChris Wilson POSTING_READ(DEIMR); 324036a4a7dSZhenyu Wang } 325036a4a7dSZhenyu Wang } 326036a4a7dSZhenyu Wang 32743eaea13SPaulo Zanoni /** 32843eaea13SPaulo Zanoni * ilk_update_gt_irq - update GTIMR 32943eaea13SPaulo Zanoni * @dev_priv: driver private 33043eaea13SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 33143eaea13SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 33243eaea13SPaulo Zanoni */ 33343eaea13SPaulo Zanoni static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 33443eaea13SPaulo Zanoni uint32_t interrupt_mask, 33543eaea13SPaulo Zanoni uint32_t enabled_irq_mask) 33643eaea13SPaulo Zanoni { 33767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 33843eaea13SPaulo Zanoni 33915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 34015a17aaeSDaniel Vetter 3419df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342c67a470bSPaulo Zanoni return; 343c67a470bSPaulo Zanoni 34443eaea13SPaulo Zanoni dev_priv->gt_irq_mask &= ~interrupt_mask; 34543eaea13SPaulo Zanoni dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 34643eaea13SPaulo Zanoni I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 34743eaea13SPaulo Zanoni } 34843eaea13SPaulo Zanoni 349480c8033SDaniel Vetter void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35043eaea13SPaulo Zanoni { 35143eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, mask); 35231bb59ccSChris Wilson POSTING_READ_FW(GTIMR); 35343eaea13SPaulo Zanoni } 35443eaea13SPaulo Zanoni 355480c8033SDaniel Vetter void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 35643eaea13SPaulo Zanoni { 35743eaea13SPaulo Zanoni ilk_update_gt_irq(dev_priv, mask, 0); 35843eaea13SPaulo Zanoni } 35943eaea13SPaulo Zanoni 360f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361b900b949SImre Deak { 362d02b98b8SOscar Mateo WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363d02b98b8SOscar Mateo 364bca2bf2aSPandiyan, Dhinakaran return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365b900b949SImre Deak } 366b900b949SImre Deak 367f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368a72fbc3aSImre Deak { 369d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 370d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_MASK; 371d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 372d02b98b8SOscar Mateo return GEN8_GT_IMR(2); 373d02b98b8SOscar Mateo else 374d02b98b8SOscar Mateo return GEN6_PMIMR; 375a72fbc3aSImre Deak } 376a72fbc3aSImre Deak 377f0f59a00SVille Syrjälä static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378b900b949SImre Deak { 379d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 380d02b98b8SOscar Mateo return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381d02b98b8SOscar Mateo else if (INTEL_GEN(dev_priv) >= 8) 382d02b98b8SOscar Mateo return GEN8_GT_IER(2); 383d02b98b8SOscar Mateo else 384d02b98b8SOscar Mateo return GEN6_PMIER; 385b900b949SImre Deak } 386b900b949SImre Deak 387edbfdb45SPaulo Zanoni /** 388edbfdb45SPaulo Zanoni * snb_update_pm_irq - update GEN6_PMIMR 389edbfdb45SPaulo Zanoni * @dev_priv: driver private 390edbfdb45SPaulo Zanoni * @interrupt_mask: mask of interrupt bits to update 391edbfdb45SPaulo Zanoni * @enabled_irq_mask: mask of interrupt bits to enable 392edbfdb45SPaulo Zanoni */ 393edbfdb45SPaulo Zanoni static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394edbfdb45SPaulo Zanoni uint32_t interrupt_mask, 395edbfdb45SPaulo Zanoni uint32_t enabled_irq_mask) 396edbfdb45SPaulo Zanoni { 397605cd25bSPaulo Zanoni uint32_t new_val; 398edbfdb45SPaulo Zanoni 39915a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 40015a17aaeSDaniel Vetter 40167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 402edbfdb45SPaulo Zanoni 403f4e9af4fSAkash Goel new_val = dev_priv->pm_imr; 404f52ecbcfSPaulo Zanoni new_val &= ~interrupt_mask; 405f52ecbcfSPaulo Zanoni new_val |= (~enabled_irq_mask & interrupt_mask); 406f52ecbcfSPaulo Zanoni 407f4e9af4fSAkash Goel if (new_val != dev_priv->pm_imr) { 408f4e9af4fSAkash Goel dev_priv->pm_imr = new_val; 409f4e9af4fSAkash Goel I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410a72fbc3aSImre Deak POSTING_READ(gen6_pm_imr(dev_priv)); 411edbfdb45SPaulo Zanoni } 412f52ecbcfSPaulo Zanoni } 413edbfdb45SPaulo Zanoni 414f4e9af4fSAkash Goel void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415edbfdb45SPaulo Zanoni { 4169939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4179939fba2SImre Deak return; 4189939fba2SImre Deak 419edbfdb45SPaulo Zanoni snb_update_pm_irq(dev_priv, mask, mask); 420edbfdb45SPaulo Zanoni } 421edbfdb45SPaulo Zanoni 422f4e9af4fSAkash Goel static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 4239939fba2SImre Deak { 4249939fba2SImre Deak snb_update_pm_irq(dev_priv, mask, 0); 4259939fba2SImre Deak } 4269939fba2SImre Deak 427f4e9af4fSAkash Goel void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428edbfdb45SPaulo Zanoni { 4299939fba2SImre Deak if (WARN_ON(!intel_irqs_enabled(dev_priv))) 4309939fba2SImre Deak return; 4319939fba2SImre Deak 432f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, mask); 433f4e9af4fSAkash Goel } 434f4e9af4fSAkash Goel 4353814fd77SOscar Mateo static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436f4e9af4fSAkash Goel { 437f4e9af4fSAkash Goel i915_reg_t reg = gen6_pm_iir(dev_priv); 438f4e9af4fSAkash Goel 43967520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 440f4e9af4fSAkash Goel 441f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 442f4e9af4fSAkash Goel I915_WRITE(reg, reset_mask); 443f4e9af4fSAkash Goel POSTING_READ(reg); 444f4e9af4fSAkash Goel } 445f4e9af4fSAkash Goel 4463814fd77SOscar Mateo static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447f4e9af4fSAkash Goel { 44867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 449f4e9af4fSAkash Goel 450f4e9af4fSAkash Goel dev_priv->pm_ier |= enable_mask; 451f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452f4e9af4fSAkash Goel gen6_unmask_pm_irq(dev_priv, enable_mask); 453f4e9af4fSAkash Goel /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454f4e9af4fSAkash Goel } 455f4e9af4fSAkash Goel 4563814fd77SOscar Mateo static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457f4e9af4fSAkash Goel { 45867520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 459f4e9af4fSAkash Goel 460f4e9af4fSAkash Goel dev_priv->pm_ier &= ~disable_mask; 461f4e9af4fSAkash Goel __gen6_mask_pm_irq(dev_priv, disable_mask); 462f4e9af4fSAkash Goel I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463f4e9af4fSAkash Goel /* though a barrier is missing here, but don't really need a one */ 464edbfdb45SPaulo Zanoni } 465edbfdb45SPaulo Zanoni 466d02b98b8SOscar Mateo void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467d02b98b8SOscar Mateo { 468d02b98b8SOscar Mateo spin_lock_irq(&dev_priv->irq_lock); 469d02b98b8SOscar Mateo 47096606f3bSOscar Mateo while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 47196606f3bSOscar Mateo ; 472d02b98b8SOscar Mateo 473d02b98b8SOscar Mateo dev_priv->gt_pm.rps.pm_iir = 0; 474d02b98b8SOscar Mateo 475d02b98b8SOscar Mateo spin_unlock_irq(&dev_priv->irq_lock); 476d02b98b8SOscar Mateo } 477d02b98b8SOscar Mateo 478dc97997aSChris Wilson void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 4793cc134e3SImre Deak { 4803cc134e3SImre Deak spin_lock_irq(&dev_priv->irq_lock); 481f4e9af4fSAkash Goel gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 482562d9baeSSagar Arun Kamble dev_priv->gt_pm.rps.pm_iir = 0; 4833cc134e3SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 4843cc134e3SImre Deak } 4853cc134e3SImre Deak 48691d14251STvrtko Ursulin void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487b900b949SImre Deak { 488562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 489562d9baeSSagar Arun Kamble 490562d9baeSSagar Arun Kamble if (READ_ONCE(rps->interrupts_enabled)) 491f2a91d1aSChris Wilson return; 492f2a91d1aSChris Wilson 493b900b949SImre Deak spin_lock_irq(&dev_priv->irq_lock); 494562d9baeSSagar Arun Kamble WARN_ON_ONCE(rps->pm_iir); 49596606f3bSOscar Mateo 496d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 49796606f3bSOscar Mateo WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498d02b98b8SOscar Mateo else 499c33d247dSChris Wilson WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 50096606f3bSOscar Mateo 501562d9baeSSagar Arun Kamble rps->interrupts_enabled = true; 502b900b949SImre Deak gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 50378e68d36SImre Deak 504b900b949SImre Deak spin_unlock_irq(&dev_priv->irq_lock); 505b900b949SImre Deak } 506b900b949SImre Deak 50791d14251STvrtko Ursulin void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508b900b949SImre Deak { 509562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 510562d9baeSSagar Arun Kamble 511562d9baeSSagar Arun Kamble if (!READ_ONCE(rps->interrupts_enabled)) 512f2a91d1aSChris Wilson return; 513f2a91d1aSChris Wilson 514d4d70aa5SImre Deak spin_lock_irq(&dev_priv->irq_lock); 515562d9baeSSagar Arun Kamble rps->interrupts_enabled = false; 5169939fba2SImre Deak 517b20e3cfeSDave Gordon I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 5189939fba2SImre Deak 519f4e9af4fSAkash Goel gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 52058072ccbSImre Deak 52158072ccbSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 52291c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 523c33d247dSChris Wilson 524c33d247dSChris Wilson /* Now that we will not be generating any more work, flush any 5253814fd77SOscar Mateo * outstanding tasks. As we are called on the RPS idle path, 526c33d247dSChris Wilson * we will reset the GPU to minimum frequencies, so the current 527c33d247dSChris Wilson * state of the worker can be discarded. 528c33d247dSChris Wilson */ 529562d9baeSSagar Arun Kamble cancel_work_sync(&rps->work); 530d02b98b8SOscar Mateo if (INTEL_GEN(dev_priv) >= 11) 531d02b98b8SOscar Mateo gen11_reset_rps_interrupts(dev_priv); 532d02b98b8SOscar Mateo else 533c33d247dSChris Wilson gen6_reset_rps_interrupts(dev_priv); 534b900b949SImre Deak } 535b900b949SImre Deak 53626705e20SSagar Arun Kamble void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 53726705e20SSagar Arun Kamble { 5381be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5391be333d3SSagar Arun Kamble 54026705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 54126705e20SSagar Arun Kamble gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 54226705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 54326705e20SSagar Arun Kamble } 54426705e20SSagar Arun Kamble 54526705e20SSagar Arun Kamble void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 54626705e20SSagar Arun Kamble { 5471be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5481be333d3SSagar Arun Kamble 54926705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 55026705e20SSagar Arun Kamble if (!dev_priv->guc.interrupts_enabled) { 55126705e20SSagar Arun Kamble WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 55226705e20SSagar Arun Kamble dev_priv->pm_guc_events); 55326705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = true; 55426705e20SSagar Arun Kamble gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 55526705e20SSagar Arun Kamble } 55626705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 55726705e20SSagar Arun Kamble } 55826705e20SSagar Arun Kamble 55926705e20SSagar Arun Kamble void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 56026705e20SSagar Arun Kamble { 5611be333d3SSagar Arun Kamble assert_rpm_wakelock_held(dev_priv); 5621be333d3SSagar Arun Kamble 56326705e20SSagar Arun Kamble spin_lock_irq(&dev_priv->irq_lock); 56426705e20SSagar Arun Kamble dev_priv->guc.interrupts_enabled = false; 56526705e20SSagar Arun Kamble 56626705e20SSagar Arun Kamble gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 56726705e20SSagar Arun Kamble 56826705e20SSagar Arun Kamble spin_unlock_irq(&dev_priv->irq_lock); 56926705e20SSagar Arun Kamble synchronize_irq(dev_priv->drm.irq); 57026705e20SSagar Arun Kamble 57126705e20SSagar Arun Kamble gen9_reset_guc_interrupts(dev_priv); 57226705e20SSagar Arun Kamble } 57326705e20SSagar Arun Kamble 5740961021aSBen Widawsky /** 5753a3b3c7dSVille Syrjälä * bdw_update_port_irq - update DE port interrupt 5763a3b3c7dSVille Syrjälä * @dev_priv: driver private 5773a3b3c7dSVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 5783a3b3c7dSVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 5793a3b3c7dSVille Syrjälä */ 5803a3b3c7dSVille Syrjälä static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 5813a3b3c7dSVille Syrjälä uint32_t interrupt_mask, 5823a3b3c7dSVille Syrjälä uint32_t enabled_irq_mask) 5833a3b3c7dSVille Syrjälä { 5843a3b3c7dSVille Syrjälä uint32_t new_val; 5853a3b3c7dSVille Syrjälä uint32_t old_val; 5863a3b3c7dSVille Syrjälä 58767520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 5883a3b3c7dSVille Syrjälä 5893a3b3c7dSVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 5903a3b3c7dSVille Syrjälä 5913a3b3c7dSVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 5923a3b3c7dSVille Syrjälä return; 5933a3b3c7dSVille Syrjälä 5943a3b3c7dSVille Syrjälä old_val = I915_READ(GEN8_DE_PORT_IMR); 5953a3b3c7dSVille Syrjälä 5963a3b3c7dSVille Syrjälä new_val = old_val; 5973a3b3c7dSVille Syrjälä new_val &= ~interrupt_mask; 5983a3b3c7dSVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 5993a3b3c7dSVille Syrjälä 6003a3b3c7dSVille Syrjälä if (new_val != old_val) { 6013a3b3c7dSVille Syrjälä I915_WRITE(GEN8_DE_PORT_IMR, new_val); 6023a3b3c7dSVille Syrjälä POSTING_READ(GEN8_DE_PORT_IMR); 6033a3b3c7dSVille Syrjälä } 6043a3b3c7dSVille Syrjälä } 6053a3b3c7dSVille Syrjälä 6063a3b3c7dSVille Syrjälä /** 607013d3752SVille Syrjälä * bdw_update_pipe_irq - update DE pipe interrupt 608013d3752SVille Syrjälä * @dev_priv: driver private 609013d3752SVille Syrjälä * @pipe: pipe whose interrupt to update 610013d3752SVille Syrjälä * @interrupt_mask: mask of interrupt bits to update 611013d3752SVille Syrjälä * @enabled_irq_mask: mask of interrupt bits to enable 612013d3752SVille Syrjälä */ 613013d3752SVille Syrjälä void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614013d3752SVille Syrjälä enum pipe pipe, 615013d3752SVille Syrjälä uint32_t interrupt_mask, 616013d3752SVille Syrjälä uint32_t enabled_irq_mask) 617013d3752SVille Syrjälä { 618013d3752SVille Syrjälä uint32_t new_val; 619013d3752SVille Syrjälä 62067520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 621013d3752SVille Syrjälä 622013d3752SVille Syrjälä WARN_ON(enabled_irq_mask & ~interrupt_mask); 623013d3752SVille Syrjälä 624013d3752SVille Syrjälä if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625013d3752SVille Syrjälä return; 626013d3752SVille Syrjälä 627013d3752SVille Syrjälä new_val = dev_priv->de_irq_mask[pipe]; 628013d3752SVille Syrjälä new_val &= ~interrupt_mask; 629013d3752SVille Syrjälä new_val |= (~enabled_irq_mask & interrupt_mask); 630013d3752SVille Syrjälä 631013d3752SVille Syrjälä if (new_val != dev_priv->de_irq_mask[pipe]) { 632013d3752SVille Syrjälä dev_priv->de_irq_mask[pipe] = new_val; 633013d3752SVille Syrjälä I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634013d3752SVille Syrjälä POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635013d3752SVille Syrjälä } 636013d3752SVille Syrjälä } 637013d3752SVille Syrjälä 638013d3752SVille Syrjälä /** 639fee884edSDaniel Vetter * ibx_display_interrupt_update - update SDEIMR 640fee884edSDaniel Vetter * @dev_priv: driver private 641fee884edSDaniel Vetter * @interrupt_mask: mask of interrupt bits to update 642fee884edSDaniel Vetter * @enabled_irq_mask: mask of interrupt bits to enable 643fee884edSDaniel Vetter */ 64447339cd9SDaniel Vetter void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645fee884edSDaniel Vetter uint32_t interrupt_mask, 646fee884edSDaniel Vetter uint32_t enabled_irq_mask) 647fee884edSDaniel Vetter { 648fee884edSDaniel Vetter uint32_t sdeimr = I915_READ(SDEIMR); 649fee884edSDaniel Vetter sdeimr &= ~interrupt_mask; 650fee884edSDaniel Vetter sdeimr |= (~enabled_irq_mask & interrupt_mask); 651fee884edSDaniel Vetter 65215a17aaeSDaniel Vetter WARN_ON(enabled_irq_mask & ~interrupt_mask); 65315a17aaeSDaniel Vetter 65467520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 655fee884edSDaniel Vetter 6569df7575fSJesse Barnes if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657c67a470bSPaulo Zanoni return; 658c67a470bSPaulo Zanoni 659fee884edSDaniel Vetter I915_WRITE(SDEIMR, sdeimr); 660fee884edSDaniel Vetter POSTING_READ(SDEIMR); 661fee884edSDaniel Vetter } 6628664281bSPaulo Zanoni 6636b12ca56SVille Syrjälä u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 6646b12ca56SVille Syrjälä enum pipe pipe) 6657c463586SKeith Packard { 6666b12ca56SVille Syrjälä u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 66710c59c51SImre Deak u32 enable_mask = status_mask << 16; 66810c59c51SImre Deak 6696b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 6706b12ca56SVille Syrjälä 6716b12ca56SVille Syrjälä if (INTEL_GEN(dev_priv) < 5) 6726b12ca56SVille Syrjälä goto out; 6736b12ca56SVille Syrjälä 67410c59c51SImre Deak /* 675724a6905SVille Syrjälä * On pipe A we don't support the PSR interrupt yet, 676724a6905SVille Syrjälä * on pipe B and C the same bit MBZ. 67710c59c51SImre Deak */ 67810c59c51SImre Deak if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 67910c59c51SImre Deak return 0; 680724a6905SVille Syrjälä /* 681724a6905SVille Syrjälä * On pipe B and C we don't support the PSR interrupt yet, on pipe 682724a6905SVille Syrjälä * A the same bit is for perf counters which we don't use either. 683724a6905SVille Syrjälä */ 684724a6905SVille Syrjälä if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685724a6905SVille Syrjälä return 0; 68610c59c51SImre Deak 68710c59c51SImre Deak enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 68810c59c51SImre Deak SPRITE0_FLIP_DONE_INT_EN_VLV | 68910c59c51SImre Deak SPRITE1_FLIP_DONE_INT_EN_VLV); 69010c59c51SImre Deak if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 69110c59c51SImre Deak enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 69210c59c51SImre Deak if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 69310c59c51SImre Deak enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 69410c59c51SImre Deak 6956b12ca56SVille Syrjälä out: 6966b12ca56SVille Syrjälä WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 6976b12ca56SVille Syrjälä status_mask & ~PIPESTAT_INT_STATUS_MASK, 6986b12ca56SVille Syrjälä "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 6996b12ca56SVille Syrjälä pipe_name(pipe), enable_mask, status_mask); 7006b12ca56SVille Syrjälä 70110c59c51SImre Deak return enable_mask; 70210c59c51SImre Deak } 70310c59c51SImre Deak 7046b12ca56SVille Syrjälä void i915_enable_pipestat(struct drm_i915_private *dev_priv, 7056b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 706755e9019SImre Deak { 7076b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 708755e9019SImre Deak u32 enable_mask; 709755e9019SImre Deak 7106b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7116b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7126b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7136b12ca56SVille Syrjälä 7146b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7156b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7166b12ca56SVille Syrjälä 7176b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 7186b12ca56SVille Syrjälä return; 7196b12ca56SVille Syrjälä 7206b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] |= status_mask; 7216b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7226b12ca56SVille Syrjälä 7236b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7246b12ca56SVille Syrjälä POSTING_READ(reg); 725755e9019SImre Deak } 726755e9019SImre Deak 7276b12ca56SVille Syrjälä void i915_disable_pipestat(struct drm_i915_private *dev_priv, 7286b12ca56SVille Syrjälä enum pipe pipe, u32 status_mask) 729755e9019SImre Deak { 7306b12ca56SVille Syrjälä i915_reg_t reg = PIPESTAT(pipe); 731755e9019SImre Deak u32 enable_mask; 732755e9019SImre Deak 7336b12ca56SVille Syrjälä WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 7346b12ca56SVille Syrjälä "pipe %c: status_mask=0x%x\n", 7356b12ca56SVille Syrjälä pipe_name(pipe), status_mask); 7366b12ca56SVille Syrjälä 7376b12ca56SVille Syrjälä lockdep_assert_held(&dev_priv->irq_lock); 7386b12ca56SVille Syrjälä WARN_ON(!intel_irqs_enabled(dev_priv)); 7396b12ca56SVille Syrjälä 7406b12ca56SVille Syrjälä if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 7416b12ca56SVille Syrjälä return; 7426b12ca56SVille Syrjälä 7436b12ca56SVille Syrjälä dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 7446b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 7456b12ca56SVille Syrjälä 7466b12ca56SVille Syrjälä I915_WRITE(reg, enable_mask | status_mask); 7476b12ca56SVille Syrjälä POSTING_READ(reg); 748755e9019SImre Deak } 749755e9019SImre Deak 750c0e09200SDave Airlie /** 751f49e38ddSJani Nikula * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 75214bb2c11STvrtko Ursulin * @dev_priv: i915 device private 75301c66889SZhao Yakui */ 75491d14251STvrtko Ursulin static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 75501c66889SZhao Yakui { 75691d14251STvrtko Ursulin if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757f49e38ddSJani Nikula return; 758f49e38ddSJani Nikula 75913321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 76001c66889SZhao Yakui 761755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 76291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 4) 7633b6c42e8SDaniel Vetter i915_enable_pipestat(dev_priv, PIPE_A, 764755e9019SImre Deak PIPE_LEGACY_BLC_EVENT_STATUS); 7651ec14ad3SChris Wilson 76613321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 76701c66889SZhao Yakui } 76801c66889SZhao Yakui 769f75f3746SVille Syrjälä /* 770f75f3746SVille Syrjälä * This timing diagram depicts the video signal in and 771f75f3746SVille Syrjälä * around the vertical blanking period. 772f75f3746SVille Syrjälä * 773f75f3746SVille Syrjälä * Assumptions about the fictitious mode used in this example: 774f75f3746SVille Syrjälä * vblank_start >= 3 775f75f3746SVille Syrjälä * vsync_start = vblank_start + 1 776f75f3746SVille Syrjälä * vsync_end = vblank_start + 2 777f75f3746SVille Syrjälä * vtotal = vblank_start + 3 778f75f3746SVille Syrjälä * 779f75f3746SVille Syrjälä * start of vblank: 780f75f3746SVille Syrjälä * latch double buffered registers 781f75f3746SVille Syrjälä * increment frame counter (ctg+) 782f75f3746SVille Syrjälä * generate start of vblank interrupt (gen4+) 783f75f3746SVille Syrjälä * | 784f75f3746SVille Syrjälä * | frame start: 785f75f3746SVille Syrjälä * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786f75f3746SVille Syrjälä * | may be shifted forward 1-3 extra lines via PIPECONF 787f75f3746SVille Syrjälä * | | 788f75f3746SVille Syrjälä * | | start of vsync: 789f75f3746SVille Syrjälä * | | generate vsync interrupt 790f75f3746SVille Syrjälä * | | | 791f75f3746SVille Syrjälä * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792f75f3746SVille Syrjälä * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793f75f3746SVille Syrjälä * ----va---> <-----------------vb--------------------> <--------va------------- 794f75f3746SVille Syrjälä * | | <----vs-----> | 795f75f3746SVille Syrjälä * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796f75f3746SVille Syrjälä * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797f75f3746SVille Syrjälä * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798f75f3746SVille Syrjälä * | | | 799f75f3746SVille Syrjälä * last visible pixel first visible pixel 800f75f3746SVille Syrjälä * | increment frame counter (gen3/4) 801f75f3746SVille Syrjälä * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802f75f3746SVille Syrjälä * 803f75f3746SVille Syrjälä * x = horizontal active 804f75f3746SVille Syrjälä * _ = horizontal blanking 805f75f3746SVille Syrjälä * hs = horizontal sync 806f75f3746SVille Syrjälä * va = vertical active 807f75f3746SVille Syrjälä * vb = vertical blanking 808f75f3746SVille Syrjälä * vs = vertical sync 809f75f3746SVille Syrjälä * vbs = vblank_start (number) 810f75f3746SVille Syrjälä * 811f75f3746SVille Syrjälä * Summary: 812f75f3746SVille Syrjälä * - most events happen at the start of horizontal sync 813f75f3746SVille Syrjälä * - frame start happens at the start of horizontal blank, 1-4 lines 814f75f3746SVille Syrjälä * (depending on PIPECONF settings) after the start of vblank 815f75f3746SVille Syrjälä * - gen3/4 pixel and frame counter are synchronized with the start 816f75f3746SVille Syrjälä * of horizontal active on the first line of vertical active 817f75f3746SVille Syrjälä */ 818f75f3746SVille Syrjälä 81942f52ef8SKeith Packard /* Called from drm generic code, passed a 'crtc', which 82042f52ef8SKeith Packard * we use as a pipe index 82142f52ef8SKeith Packard */ 82288e72717SThierry Reding static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8230a3e67a4SJesse Barnes { 824fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 825f0f59a00SVille Syrjälä i915_reg_t high_frame, low_frame; 8260b2a8e09SVille Syrjälä u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 8275caa0feaSDaniel Vetter const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828694e409dSVille Syrjälä unsigned long irqflags; 829391f75e2SVille Syrjälä 8300b2a8e09SVille Syrjälä htotal = mode->crtc_htotal; 8310b2a8e09SVille Syrjälä hsync_start = mode->crtc_hsync_start; 8320b2a8e09SVille Syrjälä vbl_start = mode->crtc_vblank_start; 8330b2a8e09SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 8340b2a8e09SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 835391f75e2SVille Syrjälä 8360b2a8e09SVille Syrjälä /* Convert to pixel count */ 8370b2a8e09SVille Syrjälä vbl_start *= htotal; 8380b2a8e09SVille Syrjälä 8390b2a8e09SVille Syrjälä /* Start of vblank event occurs at start of hsync */ 8400b2a8e09SVille Syrjälä vbl_start -= htotal - hsync_start; 8410b2a8e09SVille Syrjälä 8429db4a9c7SJesse Barnes high_frame = PIPEFRAME(pipe); 8439db4a9c7SJesse Barnes low_frame = PIPEFRAMEPIXEL(pipe); 8445eddb70bSChris Wilson 845694e409dSVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846694e409dSVille Syrjälä 8470a3e67a4SJesse Barnes /* 8480a3e67a4SJesse Barnes * High & low register fields aren't synchronized, so make sure 8490a3e67a4SJesse Barnes * we get a low value that's stable across two reads of the high 8500a3e67a4SJesse Barnes * register. 8510a3e67a4SJesse Barnes */ 8520a3e67a4SJesse Barnes do { 853694e409dSVille Syrjälä high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854694e409dSVille Syrjälä low = I915_READ_FW(low_frame); 855694e409dSVille Syrjälä high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 8560a3e67a4SJesse Barnes } while (high1 != high2); 8570a3e67a4SJesse Barnes 858694e409dSVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859694e409dSVille Syrjälä 8605eddb70bSChris Wilson high1 >>= PIPE_FRAME_HIGH_SHIFT; 861391f75e2SVille Syrjälä pixel = low & PIPE_PIXEL_MASK; 8625eddb70bSChris Wilson low >>= PIPE_FRAME_LOW_SHIFT; 863391f75e2SVille Syrjälä 864391f75e2SVille Syrjälä /* 865391f75e2SVille Syrjälä * The frame counter increments at beginning of active. 866391f75e2SVille Syrjälä * Cook up a vblank counter by also checking the pixel 867391f75e2SVille Syrjälä * counter against vblank start. 868391f75e2SVille Syrjälä */ 869edc08d0aSVille Syrjälä return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 8700a3e67a4SJesse Barnes } 8710a3e67a4SJesse Barnes 872974e59baSDave Airlie static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 8739880b7a5SJesse Barnes { 874fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 8759880b7a5SJesse Barnes 876649636efSVille Syrjälä return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 8779880b7a5SJesse Barnes } 8789880b7a5SJesse Barnes 879aec0246fSUma Shankar /* 880aec0246fSUma Shankar * On certain encoders on certain platforms, pipe 881aec0246fSUma Shankar * scanline register will not work to get the scanline, 882aec0246fSUma Shankar * since the timings are driven from the PORT or issues 883aec0246fSUma Shankar * with scanline register updates. 884aec0246fSUma Shankar * This function will use Framestamp and current 885aec0246fSUma Shankar * timestamp registers to calculate the scanline. 886aec0246fSUma Shankar */ 887aec0246fSUma Shankar static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888aec0246fSUma Shankar { 889aec0246fSUma Shankar struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890aec0246fSUma Shankar struct drm_vblank_crtc *vblank = 891aec0246fSUma Shankar &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892aec0246fSUma Shankar const struct drm_display_mode *mode = &vblank->hwmode; 893aec0246fSUma Shankar u32 vblank_start = mode->crtc_vblank_start; 894aec0246fSUma Shankar u32 vtotal = mode->crtc_vtotal; 895aec0246fSUma Shankar u32 htotal = mode->crtc_htotal; 896aec0246fSUma Shankar u32 clock = mode->crtc_clock; 897aec0246fSUma Shankar u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898aec0246fSUma Shankar 899aec0246fSUma Shankar /* 900aec0246fSUma Shankar * To avoid the race condition where we might cross into the 901aec0246fSUma Shankar * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902aec0246fSUma Shankar * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903aec0246fSUma Shankar * during the same frame. 904aec0246fSUma Shankar */ 905aec0246fSUma Shankar do { 906aec0246fSUma Shankar /* 907aec0246fSUma Shankar * This field provides read back of the display 908aec0246fSUma Shankar * pipe frame time stamp. The time stamp value 909aec0246fSUma Shankar * is sampled at every start of vertical blank. 910aec0246fSUma Shankar */ 911aec0246fSUma Shankar scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912aec0246fSUma Shankar 913aec0246fSUma Shankar /* 914aec0246fSUma Shankar * The TIMESTAMP_CTR register has the current 915aec0246fSUma Shankar * time stamp value. 916aec0246fSUma Shankar */ 917aec0246fSUma Shankar scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918aec0246fSUma Shankar 919aec0246fSUma Shankar scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920aec0246fSUma Shankar } while (scan_post_time != scan_prev_time); 921aec0246fSUma Shankar 922aec0246fSUma Shankar scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923aec0246fSUma Shankar clock), 1000 * htotal); 924aec0246fSUma Shankar scanline = min(scanline, vtotal - 1); 925aec0246fSUma Shankar scanline = (scanline + vblank_start) % vtotal; 926aec0246fSUma Shankar 927aec0246fSUma Shankar return scanline; 928aec0246fSUma Shankar } 929aec0246fSUma Shankar 93075aa3f63SVille Syrjälä /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931a225f079SVille Syrjälä static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932a225f079SVille Syrjälä { 933a225f079SVille Syrjälä struct drm_device *dev = crtc->base.dev; 934fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 9355caa0feaSDaniel Vetter const struct drm_display_mode *mode; 9365caa0feaSDaniel Vetter struct drm_vblank_crtc *vblank; 937a225f079SVille Syrjälä enum pipe pipe = crtc->pipe; 93880715b2fSVille Syrjälä int position, vtotal; 939a225f079SVille Syrjälä 94072259536SVille Syrjälä if (!crtc->active) 94172259536SVille Syrjälä return -1; 94272259536SVille Syrjälä 9435caa0feaSDaniel Vetter vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 9445caa0feaSDaniel Vetter mode = &vblank->hwmode; 9455caa0feaSDaniel Vetter 946aec0246fSUma Shankar if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947aec0246fSUma Shankar return __intel_get_crtc_scanline_from_timestamp(crtc); 948aec0246fSUma Shankar 94980715b2fSVille Syrjälä vtotal = mode->crtc_vtotal; 950a225f079SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951a225f079SVille Syrjälä vtotal /= 2; 952a225f079SVille Syrjälä 95391d14251STvrtko Ursulin if (IS_GEN2(dev_priv)) 95475aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955a225f079SVille Syrjälä else 95675aa3f63SVille Syrjälä position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957a225f079SVille Syrjälä 958a225f079SVille Syrjälä /* 95941b578fbSJesse Barnes * On HSW, the DSL reg (0x70000) appears to return 0 if we 96041b578fbSJesse Barnes * read it just before the start of vblank. So try it again 96141b578fbSJesse Barnes * so we don't accidentally end up spanning a vblank frame 96241b578fbSJesse Barnes * increment, causing the pipe_update_end() code to squak at us. 96341b578fbSJesse Barnes * 96441b578fbSJesse Barnes * The nature of this problem means we can't simply check the ISR 96541b578fbSJesse Barnes * bit and return the vblank start value; nor can we use the scanline 96641b578fbSJesse Barnes * debug register in the transcoder as it appears to have the same 96741b578fbSJesse Barnes * problem. We may need to extend this to include other platforms, 96841b578fbSJesse Barnes * but so far testing only shows the problem on HSW. 96941b578fbSJesse Barnes */ 97091d14251STvrtko Ursulin if (HAS_DDI(dev_priv) && !position) { 97141b578fbSJesse Barnes int i, temp; 97241b578fbSJesse Barnes 97341b578fbSJesse Barnes for (i = 0; i < 100; i++) { 97441b578fbSJesse Barnes udelay(1); 975707bdd3fSVille Syrjälä temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 97641b578fbSJesse Barnes if (temp != position) { 97741b578fbSJesse Barnes position = temp; 97841b578fbSJesse Barnes break; 97941b578fbSJesse Barnes } 98041b578fbSJesse Barnes } 98141b578fbSJesse Barnes } 98241b578fbSJesse Barnes 98341b578fbSJesse Barnes /* 98480715b2fSVille Syrjälä * See update_scanline_offset() for the details on the 98580715b2fSVille Syrjälä * scanline_offset adjustment. 986a225f079SVille Syrjälä */ 98780715b2fSVille Syrjälä return (position + crtc->scanline_offset) % vtotal; 988a225f079SVille Syrjälä } 989a225f079SVille Syrjälä 9901bf6ad62SDaniel Vetter static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 9911bf6ad62SDaniel Vetter bool in_vblank_irq, int *vpos, int *hpos, 9923bb403bfSVille Syrjälä ktime_t *stime, ktime_t *etime, 9933bb403bfSVille Syrjälä const struct drm_display_mode *mode) 9940af7e4dfSMario Kleiner { 995fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 99698187836SVille Syrjälä struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 99798187836SVille Syrjälä pipe); 9983aa18df8SVille Syrjälä int position; 99978e8fc6bSVille Syrjälä int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000ad3543edSMario Kleiner unsigned long irqflags; 10010af7e4dfSMario Kleiner 1002fc467a22SMaarten Lankhorst if (WARN_ON(!mode->crtc_clock)) { 10030af7e4dfSMario Kleiner DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 10049db4a9c7SJesse Barnes "pipe %c\n", pipe_name(pipe)); 10051bf6ad62SDaniel Vetter return false; 10060af7e4dfSMario Kleiner } 10070af7e4dfSMario Kleiner 1008c2baf4b7SVille Syrjälä htotal = mode->crtc_htotal; 100978e8fc6bSVille Syrjälä hsync_start = mode->crtc_hsync_start; 1010c2baf4b7SVille Syrjälä vtotal = mode->crtc_vtotal; 1011c2baf4b7SVille Syrjälä vbl_start = mode->crtc_vblank_start; 1012c2baf4b7SVille Syrjälä vbl_end = mode->crtc_vblank_end; 10130af7e4dfSMario Kleiner 1014d31faf65SVille Syrjälä if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015d31faf65SVille Syrjälä vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016d31faf65SVille Syrjälä vbl_end /= 2; 1017d31faf65SVille Syrjälä vtotal /= 2; 1018d31faf65SVille Syrjälä } 1019d31faf65SVille Syrjälä 1020ad3543edSMario Kleiner /* 1021ad3543edSMario Kleiner * Lock uncore.lock, as we will do multiple timing critical raw 1022ad3543edSMario Kleiner * register reads, potentially with preemption disabled, so the 1023ad3543edSMario Kleiner * following code must not block on uncore.lock. 1024ad3543edSMario Kleiner */ 1025ad3543edSMario Kleiner spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026ad3543edSMario Kleiner 1027ad3543edSMario Kleiner /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028ad3543edSMario Kleiner 1029ad3543edSMario Kleiner /* Get optional system timestamp before query. */ 1030ad3543edSMario Kleiner if (stime) 1031ad3543edSMario Kleiner *stime = ktime_get(); 1032ad3543edSMario Kleiner 103391d14251STvrtko Ursulin if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10340af7e4dfSMario Kleiner /* No obvious pixelcount register. Only query vertical 10350af7e4dfSMario Kleiner * scanout position from Display scan line register. 10360af7e4dfSMario Kleiner */ 1037a225f079SVille Syrjälä position = __intel_get_crtc_scanline(intel_crtc); 10380af7e4dfSMario Kleiner } else { 10390af7e4dfSMario Kleiner /* Have access to pixelcount since start of frame. 10400af7e4dfSMario Kleiner * We can split this into vertical and horizontal 10410af7e4dfSMario Kleiner * scanout position. 10420af7e4dfSMario Kleiner */ 104375aa3f63SVille Syrjälä position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 10440af7e4dfSMario Kleiner 10453aa18df8SVille Syrjälä /* convert to pixel counts */ 10463aa18df8SVille Syrjälä vbl_start *= htotal; 10473aa18df8SVille Syrjälä vbl_end *= htotal; 10483aa18df8SVille Syrjälä vtotal *= htotal; 104978e8fc6bSVille Syrjälä 105078e8fc6bSVille Syrjälä /* 10517e78f1cbSVille Syrjälä * In interlaced modes, the pixel counter counts all pixels, 10527e78f1cbSVille Syrjälä * so one field will have htotal more pixels. In order to avoid 10537e78f1cbSVille Syrjälä * the reported position from jumping backwards when the pixel 10547e78f1cbSVille Syrjälä * counter is beyond the length of the shorter field, just 10557e78f1cbSVille Syrjälä * clamp the position the length of the shorter field. This 10567e78f1cbSVille Syrjälä * matches how the scanline counter based position works since 10577e78f1cbSVille Syrjälä * the scanline counter doesn't count the two half lines. 10587e78f1cbSVille Syrjälä */ 10597e78f1cbSVille Syrjälä if (position >= vtotal) 10607e78f1cbSVille Syrjälä position = vtotal - 1; 10617e78f1cbSVille Syrjälä 10627e78f1cbSVille Syrjälä /* 106378e8fc6bSVille Syrjälä * Start of vblank interrupt is triggered at start of hsync, 106478e8fc6bSVille Syrjälä * just prior to the first active line of vblank. However we 106578e8fc6bSVille Syrjälä * consider lines to start at the leading edge of horizontal 106678e8fc6bSVille Syrjälä * active. So, should we get here before we've crossed into 106778e8fc6bSVille Syrjälä * the horizontal active of the first line in vblank, we would 106878e8fc6bSVille Syrjälä * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 106978e8fc6bSVille Syrjälä * always add htotal-hsync_start to the current pixel position. 107078e8fc6bSVille Syrjälä */ 107178e8fc6bSVille Syrjälä position = (position + htotal - hsync_start) % vtotal; 10723aa18df8SVille Syrjälä } 10733aa18df8SVille Syrjälä 1074ad3543edSMario Kleiner /* Get optional system timestamp after query. */ 1075ad3543edSMario Kleiner if (etime) 1076ad3543edSMario Kleiner *etime = ktime_get(); 1077ad3543edSMario Kleiner 1078ad3543edSMario Kleiner /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079ad3543edSMario Kleiner 1080ad3543edSMario Kleiner spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081ad3543edSMario Kleiner 10823aa18df8SVille Syrjälä /* 10833aa18df8SVille Syrjälä * While in vblank, position will be negative 10843aa18df8SVille Syrjälä * counting up towards 0 at vbl_end. And outside 10853aa18df8SVille Syrjälä * vblank, position will be positive counting 10863aa18df8SVille Syrjälä * up since vbl_end. 10873aa18df8SVille Syrjälä */ 10883aa18df8SVille Syrjälä if (position >= vbl_start) 10893aa18df8SVille Syrjälä position -= vbl_end; 10903aa18df8SVille Syrjälä else 10913aa18df8SVille Syrjälä position += vtotal - vbl_end; 10923aa18df8SVille Syrjälä 109391d14251STvrtko Ursulin if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 10943aa18df8SVille Syrjälä *vpos = position; 10953aa18df8SVille Syrjälä *hpos = 0; 10963aa18df8SVille Syrjälä } else { 10970af7e4dfSMario Kleiner *vpos = position / htotal; 10980af7e4dfSMario Kleiner *hpos = position - (*vpos * htotal); 10990af7e4dfSMario Kleiner } 11000af7e4dfSMario Kleiner 11011bf6ad62SDaniel Vetter return true; 11020af7e4dfSMario Kleiner } 11030af7e4dfSMario Kleiner 1104a225f079SVille Syrjälä int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105a225f079SVille Syrjälä { 1106fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107a225f079SVille Syrjälä unsigned long irqflags; 1108a225f079SVille Syrjälä int position; 1109a225f079SVille Syrjälä 1110a225f079SVille Syrjälä spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111a225f079SVille Syrjälä position = __intel_get_crtc_scanline(crtc); 1112a225f079SVille Syrjälä spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113a225f079SVille Syrjälä 1114a225f079SVille Syrjälä return position; 1115a225f079SVille Syrjälä } 1116a225f079SVille Syrjälä 111791d14251STvrtko Ursulin static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118f97108d1SJesse Barnes { 1119b5b72e89SMatthew Garrett u32 busy_up, busy_down, max_avg, min_avg; 11209270388eSDaniel Vetter u8 new_delay; 11219270388eSDaniel Vetter 1122d0ecd7e2SDaniel Vetter spin_lock(&mchdev_lock); 1123f97108d1SJesse Barnes 112473edd18fSDaniel Vetter I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 112573edd18fSDaniel Vetter 112620e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay; 11279270388eSDaniel Vetter 11287648fa99SJesse Barnes I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129b5b72e89SMatthew Garrett busy_up = I915_READ(RCPREVBSYTUPAVG); 1130b5b72e89SMatthew Garrett busy_down = I915_READ(RCPREVBSYTDNAVG); 1131f97108d1SJesse Barnes max_avg = I915_READ(RCBMAXAVG); 1132f97108d1SJesse Barnes min_avg = I915_READ(RCBMINAVG); 1133f97108d1SJesse Barnes 1134f97108d1SJesse Barnes /* Handle RCS change request from hw */ 1135b5b72e89SMatthew Garrett if (busy_up > max_avg) { 113620e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 113720e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay - 1; 113820e4d407SDaniel Vetter if (new_delay < dev_priv->ips.max_delay) 113920e4d407SDaniel Vetter new_delay = dev_priv->ips.max_delay; 1140b5b72e89SMatthew Garrett } else if (busy_down < min_avg) { 114120e4d407SDaniel Vetter if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 114220e4d407SDaniel Vetter new_delay = dev_priv->ips.cur_delay + 1; 114320e4d407SDaniel Vetter if (new_delay > dev_priv->ips.min_delay) 114420e4d407SDaniel Vetter new_delay = dev_priv->ips.min_delay; 1145f97108d1SJesse Barnes } 1146f97108d1SJesse Barnes 114791d14251STvrtko Ursulin if (ironlake_set_drps(dev_priv, new_delay)) 114820e4d407SDaniel Vetter dev_priv->ips.cur_delay = new_delay; 1149f97108d1SJesse Barnes 1150d0ecd7e2SDaniel Vetter spin_unlock(&mchdev_lock); 11519270388eSDaniel Vetter 1152f97108d1SJesse Barnes return; 1153f97108d1SJesse Barnes } 1154f97108d1SJesse Barnes 11550bc40be8STvrtko Ursulin static void notify_ring(struct intel_engine_cs *engine) 1156549f7365SChris Wilson { 11573f88325cSChris Wilson const u32 seqno = intel_engine_get_seqno(engine); 1158e61e0f51SChris Wilson struct i915_request *rq = NULL; 11593f88325cSChris Wilson struct task_struct *tsk = NULL; 116056299fb7SChris Wilson struct intel_wait *wait; 1161dffabc8fSTvrtko Ursulin 11623f88325cSChris Wilson if (unlikely(!engine->breadcrumbs.irq_armed)) 1163bcbd5c33SChris Wilson return; 1164bcbd5c33SChris Wilson 11653f88325cSChris Wilson rcu_read_lock(); 116656299fb7SChris Wilson 116761d3dc70SChris Wilson spin_lock(&engine->breadcrumbs.irq_lock); 116861d3dc70SChris Wilson wait = engine->breadcrumbs.irq_wait; 116956299fb7SChris Wilson if (wait) { 11703f88325cSChris Wilson /* 11713f88325cSChris Wilson * We use a callback from the dma-fence to submit 117256299fb7SChris Wilson * requests after waiting on our own requests. To 117356299fb7SChris Wilson * ensure minimum delay in queuing the next request to 117456299fb7SChris Wilson * hardware, signal the fence now rather than wait for 117556299fb7SChris Wilson * the signaler to be woken up. We still wake up the 117656299fb7SChris Wilson * waiter in order to handle the irq-seqno coherency 117756299fb7SChris Wilson * issues (we may receive the interrupt before the 117856299fb7SChris Wilson * seqno is written, see __i915_request_irq_complete()) 117956299fb7SChris Wilson * and to handle coalescing of multiple seqno updates 118056299fb7SChris Wilson * and many waiters. 118156299fb7SChris Wilson */ 11823f88325cSChris Wilson if (i915_seqno_passed(seqno, wait->seqno)) { 1183e61e0f51SChris Wilson struct i915_request *waiter = wait->request; 1184de4d2106SChris Wilson 1185e3be4079SChris Wilson if (waiter && 1186e3be4079SChris Wilson !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1187de4d2106SChris Wilson &waiter->fence.flags) && 1188de4d2106SChris Wilson intel_wait_check_request(wait, waiter)) 1189e61e0f51SChris Wilson rq = i915_request_get(waiter); 119056299fb7SChris Wilson 11913f88325cSChris Wilson tsk = wait->tsk; 11923f88325cSChris Wilson } else { 119369dc4d00SChris Wilson if (engine->irq_seqno_barrier && 119469dc4d00SChris Wilson i915_seqno_passed(seqno, wait->seqno - 1)) { 11953f88325cSChris Wilson set_bit(ENGINE_IRQ_BREADCRUMB, 11963f88325cSChris Wilson &engine->irq_posted); 11973f88325cSChris Wilson tsk = wait->tsk; 11983f88325cSChris Wilson } 11993f88325cSChris Wilson } 120078796877SChris Wilson 120178796877SChris Wilson engine->breadcrumbs.irq_count++; 120267b807a8SChris Wilson } else { 1203bcbd5c33SChris Wilson if (engine->breadcrumbs.irq_armed) 120467b807a8SChris Wilson __intel_engine_disarm_breadcrumbs(engine); 120556299fb7SChris Wilson } 120661d3dc70SChris Wilson spin_unlock(&engine->breadcrumbs.irq_lock); 120756299fb7SChris Wilson 120824754d75SChris Wilson if (rq) { 1209e3be4079SChris Wilson spin_lock(&rq->lock); 1210e3be4079SChris Wilson dma_fence_signal_locked(&rq->fence); 12114e9a8befSChris Wilson GEM_BUG_ON(!i915_request_completed(rq)); 1212e3be4079SChris Wilson spin_unlock(&rq->lock); 1213e3be4079SChris Wilson 1214e61e0f51SChris Wilson i915_request_put(rq); 121524754d75SChris Wilson } 121656299fb7SChris Wilson 12173f88325cSChris Wilson if (tsk && tsk->state & TASK_NORMAL) 12183f88325cSChris Wilson wake_up_process(tsk); 12193f88325cSChris Wilson 12203f88325cSChris Wilson rcu_read_unlock(); 12213f88325cSChris Wilson 122256299fb7SChris Wilson trace_intel_engine_notify(engine, wait); 1223549f7365SChris Wilson } 1224549f7365SChris Wilson 122543cf3bf0SChris Wilson static void vlv_c0_read(struct drm_i915_private *dev_priv, 122643cf3bf0SChris Wilson struct intel_rps_ei *ei) 122731685c25SDeepak S { 1228679cb6c1SMika Kuoppala ei->ktime = ktime_get_raw(); 122943cf3bf0SChris Wilson ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 123043cf3bf0SChris Wilson ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 123131685c25SDeepak S } 123231685c25SDeepak S 123343cf3bf0SChris Wilson void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 123443cf3bf0SChris Wilson { 1235562d9baeSSagar Arun Kamble memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 123643cf3bf0SChris Wilson } 123743cf3bf0SChris Wilson 123843cf3bf0SChris Wilson static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 123943cf3bf0SChris Wilson { 1240562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1241562d9baeSSagar Arun Kamble const struct intel_rps_ei *prev = &rps->ei; 124243cf3bf0SChris Wilson struct intel_rps_ei now; 124343cf3bf0SChris Wilson u32 events = 0; 124443cf3bf0SChris Wilson 1245e0e8c7cbSChris Wilson if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 124643cf3bf0SChris Wilson return 0; 124743cf3bf0SChris Wilson 124843cf3bf0SChris Wilson vlv_c0_read(dev_priv, &now); 124931685c25SDeepak S 1250679cb6c1SMika Kuoppala if (prev->ktime) { 1251e0e8c7cbSChris Wilson u64 time, c0; 1252569884e3SChris Wilson u32 render, media; 1253e0e8c7cbSChris Wilson 1254679cb6c1SMika Kuoppala time = ktime_us_delta(now.ktime, prev->ktime); 12558f68d591SChris Wilson 1256e0e8c7cbSChris Wilson time *= dev_priv->czclk_freq; 1257e0e8c7cbSChris Wilson 1258e0e8c7cbSChris Wilson /* Workload can be split between render + media, 1259e0e8c7cbSChris Wilson * e.g. SwapBuffers being blitted in X after being rendered in 1260e0e8c7cbSChris Wilson * mesa. To account for this we need to combine both engines 1261e0e8c7cbSChris Wilson * into our activity counter. 1262e0e8c7cbSChris Wilson */ 1263569884e3SChris Wilson render = now.render_c0 - prev->render_c0; 1264569884e3SChris Wilson media = now.media_c0 - prev->media_c0; 1265569884e3SChris Wilson c0 = max(render, media); 12666b7f6aa7SMika Kuoppala c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1267e0e8c7cbSChris Wilson 1268562d9baeSSagar Arun Kamble if (c0 > time * rps->up_threshold) 1269e0e8c7cbSChris Wilson events = GEN6_PM_RP_UP_THRESHOLD; 1270562d9baeSSagar Arun Kamble else if (c0 < time * rps->down_threshold) 1271e0e8c7cbSChris Wilson events = GEN6_PM_RP_DOWN_THRESHOLD; 127231685c25SDeepak S } 127331685c25SDeepak S 1274562d9baeSSagar Arun Kamble rps->ei = now; 127543cf3bf0SChris Wilson return events; 127631685c25SDeepak S } 127731685c25SDeepak S 12784912d041SBen Widawsky static void gen6_pm_rps_work(struct work_struct *work) 12793b8d8d91SJesse Barnes { 12802d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1281562d9baeSSagar Arun Kamble container_of(work, struct drm_i915_private, gt_pm.rps.work); 1282562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 12837c0a16adSChris Wilson bool client_boost = false; 12848d3afd7dSChris Wilson int new_delay, adj, min, max; 12857c0a16adSChris Wilson u32 pm_iir = 0; 12863b8d8d91SJesse Barnes 128759cdb63dSDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1288562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1289562d9baeSSagar Arun Kamble pm_iir = fetch_and_zero(&rps->pm_iir); 1290562d9baeSSagar Arun Kamble client_boost = atomic_read(&rps->num_waiters); 1291d4d70aa5SImre Deak } 129259cdb63dSDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 12934912d041SBen Widawsky 129460611c13SPaulo Zanoni /* Make sure we didn't queue anything we're not going to process. */ 1295a6706b45SDeepak S WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 12968d3afd7dSChris Wilson if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 12977c0a16adSChris Wilson goto out; 12983b8d8d91SJesse Barnes 12999f817501SSagar Arun Kamble mutex_lock(&dev_priv->pcu_lock); 13007b9e0ae6SChris Wilson 130143cf3bf0SChris Wilson pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 130243cf3bf0SChris Wilson 1303562d9baeSSagar Arun Kamble adj = rps->last_adj; 1304562d9baeSSagar Arun Kamble new_delay = rps->cur_freq; 1305562d9baeSSagar Arun Kamble min = rps->min_freq_softlimit; 1306562d9baeSSagar Arun Kamble max = rps->max_freq_softlimit; 13077b92c1bdSChris Wilson if (client_boost) 1308562d9baeSSagar Arun Kamble max = rps->max_freq; 1309562d9baeSSagar Arun Kamble if (client_boost && new_delay < rps->boost_freq) { 1310562d9baeSSagar Arun Kamble new_delay = rps->boost_freq; 13118d3afd7dSChris Wilson adj = 0; 13128d3afd7dSChris Wilson } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1313dd75fdc8SChris Wilson if (adj > 0) 1314dd75fdc8SChris Wilson adj *= 2; 1315edcf284bSChris Wilson else /* CHV needs even encode values */ 1316edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 13177e79a683SSagar Arun Kamble 1318562d9baeSSagar Arun Kamble if (new_delay >= rps->max_freq_softlimit) 13197e79a683SSagar Arun Kamble adj = 0; 13207b92c1bdSChris Wilson } else if (client_boost) { 1321f5a4c67dSChris Wilson adj = 0; 1322dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1323562d9baeSSagar Arun Kamble if (rps->cur_freq > rps->efficient_freq) 1324562d9baeSSagar Arun Kamble new_delay = rps->efficient_freq; 1325562d9baeSSagar Arun Kamble else if (rps->cur_freq > rps->min_freq_softlimit) 1326562d9baeSSagar Arun Kamble new_delay = rps->min_freq_softlimit; 1327dd75fdc8SChris Wilson adj = 0; 1328dd75fdc8SChris Wilson } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1329dd75fdc8SChris Wilson if (adj < 0) 1330dd75fdc8SChris Wilson adj *= 2; 1331edcf284bSChris Wilson else /* CHV needs even encode values */ 1332edcf284bSChris Wilson adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 13337e79a683SSagar Arun Kamble 1334562d9baeSSagar Arun Kamble if (new_delay <= rps->min_freq_softlimit) 13357e79a683SSagar Arun Kamble adj = 0; 1336dd75fdc8SChris Wilson } else { /* unknown event */ 1337edcf284bSChris Wilson adj = 0; 1338dd75fdc8SChris Wilson } 13393b8d8d91SJesse Barnes 1340562d9baeSSagar Arun Kamble rps->last_adj = adj; 1341edcf284bSChris Wilson 134279249636SBen Widawsky /* sysfs frequency interfaces may have snuck in while servicing the 134379249636SBen Widawsky * interrupt 134479249636SBen Widawsky */ 1345edcf284bSChris Wilson new_delay += adj; 13468d3afd7dSChris Wilson new_delay = clamp_t(int, new_delay, min, max); 134727544369SDeepak S 13489fcee2f7SChris Wilson if (intel_set_rps(dev_priv, new_delay)) { 13499fcee2f7SChris Wilson DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1350562d9baeSSagar Arun Kamble rps->last_adj = 0; 13519fcee2f7SChris Wilson } 13523b8d8d91SJesse Barnes 13539f817501SSagar Arun Kamble mutex_unlock(&dev_priv->pcu_lock); 13547c0a16adSChris Wilson 13557c0a16adSChris Wilson out: 13567c0a16adSChris Wilson /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 13577c0a16adSChris Wilson spin_lock_irq(&dev_priv->irq_lock); 1358562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) 13597c0a16adSChris Wilson gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 13607c0a16adSChris Wilson spin_unlock_irq(&dev_priv->irq_lock); 13613b8d8d91SJesse Barnes } 13623b8d8d91SJesse Barnes 1363e3689190SBen Widawsky 1364e3689190SBen Widawsky /** 1365e3689190SBen Widawsky * ivybridge_parity_work - Workqueue called when a parity error interrupt 1366e3689190SBen Widawsky * occurred. 1367e3689190SBen Widawsky * @work: workqueue struct 1368e3689190SBen Widawsky * 1369e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 1370e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 1371e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 1372e3689190SBen Widawsky */ 1373e3689190SBen Widawsky static void ivybridge_parity_work(struct work_struct *work) 1374e3689190SBen Widawsky { 13752d1013ddSJani Nikula struct drm_i915_private *dev_priv = 1376cefcff8fSJoonas Lahtinen container_of(work, typeof(*dev_priv), l3_parity.error_work); 1377e3689190SBen Widawsky u32 error_status, row, bank, subbank; 137835a85ac6SBen Widawsky char *parity_event[6]; 1379e3689190SBen Widawsky uint32_t misccpctl; 138035a85ac6SBen Widawsky uint8_t slice = 0; 1381e3689190SBen Widawsky 1382e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 1383e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 1384e3689190SBen Widawsky * any time we access those registers. 1385e3689190SBen Widawsky */ 138691c8a326SChris Wilson mutex_lock(&dev_priv->drm.struct_mutex); 1387e3689190SBen Widawsky 138835a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 138935a85ac6SBen Widawsky if (WARN_ON(!dev_priv->l3_parity.which_slice)) 139035a85ac6SBen Widawsky goto out; 139135a85ac6SBen Widawsky 1392e3689190SBen Widawsky misccpctl = I915_READ(GEN7_MISCCPCTL); 1393e3689190SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1394e3689190SBen Widawsky POSTING_READ(GEN7_MISCCPCTL); 1395e3689190SBen Widawsky 139635a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1397f0f59a00SVille Syrjälä i915_reg_t reg; 139835a85ac6SBen Widawsky 139935a85ac6SBen Widawsky slice--; 14002d1fe073SJoonas Lahtinen if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 140135a85ac6SBen Widawsky break; 140235a85ac6SBen Widawsky 140335a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 140435a85ac6SBen Widawsky 14056fa1c5f1SVille Syrjälä reg = GEN7_L3CDERRST1(slice); 140635a85ac6SBen Widawsky 140735a85ac6SBen Widawsky error_status = I915_READ(reg); 1408e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 1409e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 1410e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1411e3689190SBen Widawsky 141235a85ac6SBen Widawsky I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 141335a85ac6SBen Widawsky POSTING_READ(reg); 1414e3689190SBen Widawsky 1415cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1416e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1417e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1418e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 141935a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 142035a85ac6SBen Widawsky parity_event[5] = NULL; 1421e3689190SBen Widawsky 142291c8a326SChris Wilson kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1423e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 1424e3689190SBen Widawsky 142535a85ac6SBen Widawsky DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 142635a85ac6SBen Widawsky slice, row, bank, subbank); 1427e3689190SBen Widawsky 142835a85ac6SBen Widawsky kfree(parity_event[4]); 1429e3689190SBen Widawsky kfree(parity_event[3]); 1430e3689190SBen Widawsky kfree(parity_event[2]); 1431e3689190SBen Widawsky kfree(parity_event[1]); 1432e3689190SBen Widawsky } 1433e3689190SBen Widawsky 143435a85ac6SBen Widawsky I915_WRITE(GEN7_MISCCPCTL, misccpctl); 143535a85ac6SBen Widawsky 143635a85ac6SBen Widawsky out: 143735a85ac6SBen Widawsky WARN_ON(dev_priv->l3_parity.which_slice); 14384cb21832SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 14392d1fe073SJoonas Lahtinen gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 14404cb21832SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 144135a85ac6SBen Widawsky 144291c8a326SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 144335a85ac6SBen Widawsky } 144435a85ac6SBen Widawsky 1445261e40b8SVille Syrjälä static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1446261e40b8SVille Syrjälä u32 iir) 1447e3689190SBen Widawsky { 1448261e40b8SVille Syrjälä if (!HAS_L3_DPF(dev_priv)) 1449e3689190SBen Widawsky return; 1450e3689190SBen Widawsky 1451d0ecd7e2SDaniel Vetter spin_lock(&dev_priv->irq_lock); 1452261e40b8SVille Syrjälä gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453d0ecd7e2SDaniel Vetter spin_unlock(&dev_priv->irq_lock); 1454e3689190SBen Widawsky 1455261e40b8SVille Syrjälä iir &= GT_PARITY_ERROR(dev_priv); 145635a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 145735a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 1; 145835a85ac6SBen Widawsky 145935a85ac6SBen Widawsky if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 146035a85ac6SBen Widawsky dev_priv->l3_parity.which_slice |= 1 << 0; 146135a85ac6SBen Widawsky 1462a4da4fa4SDaniel Vetter queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1463e3689190SBen Widawsky } 1464e3689190SBen Widawsky 1465261e40b8SVille Syrjälä static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1466f1af8fc1SPaulo Zanoni u32 gt_iir) 1467f1af8fc1SPaulo Zanoni { 1468f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14693b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1470f1af8fc1SPaulo Zanoni if (gt_iir & ILK_BSD_USER_INTERRUPT) 14713b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1472f1af8fc1SPaulo Zanoni } 1473f1af8fc1SPaulo Zanoni 1474261e40b8SVille Syrjälä static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1475e7b4c6b1SDaniel Vetter u32 gt_iir) 1476e7b4c6b1SDaniel Vetter { 1477f8973c21SChris Wilson if (gt_iir & GT_RENDER_USER_INTERRUPT) 14783b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 1479cc609d5dSBen Widawsky if (gt_iir & GT_BSD_USER_INTERRUPT) 14803b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 1481cc609d5dSBen Widawsky if (gt_iir & GT_BLT_USER_INTERRUPT) 14823b3f1650SAkash Goel notify_ring(dev_priv->engine[BCS]); 1483e7b4c6b1SDaniel Vetter 1484cc609d5dSBen Widawsky if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1485cc609d5dSBen Widawsky GT_BSD_CS_ERROR_INTERRUPT | 1486aaecdf61SDaniel Vetter GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1487aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1488e3689190SBen Widawsky 1489261e40b8SVille Syrjälä if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1490261e40b8SVille Syrjälä ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1491e7b4c6b1SDaniel Vetter } 1492e7b4c6b1SDaniel Vetter 14935d3d69d5SChris Wilson static void 149451f6b0f9SChris Wilson gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1495fbcc1a0cSNick Hoath { 149631de7350SChris Wilson bool tasklet = false; 1497f747026cSChris Wilson 1498fd8526e5SChris Wilson if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 14998ea397faSChris Wilson tasklet = true; 150031de7350SChris Wilson 150151f6b0f9SChris Wilson if (iir & GT_RENDER_USER_INTERRUPT) { 150231de7350SChris Wilson notify_ring(engine); 150393ffbe8eSMichal Wajdeczko tasklet |= USES_GUC_SUBMISSION(engine->i915); 150431de7350SChris Wilson } 150531de7350SChris Wilson 150631de7350SChris Wilson if (tasklet) 1507fd8526e5SChris Wilson tasklet_hi_schedule(&engine->execlists.tasklet); 1508fbcc1a0cSNick Hoath } 1509fbcc1a0cSNick Hoath 15102e4a5b25SChris Wilson static void gen8_gt_irq_ack(struct drm_i915_private *i915, 151155ef72f2SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1512abd58f01SBen Widawsky { 15132e4a5b25SChris Wilson void __iomem * const regs = i915->regs; 15142e4a5b25SChris Wilson 1515f0fd96f5SChris Wilson #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1516f0fd96f5SChris Wilson GEN8_GT_BCS_IRQ | \ 1517f0fd96f5SChris Wilson GEN8_GT_VCS1_IRQ | \ 1518f0fd96f5SChris Wilson GEN8_GT_VCS2_IRQ | \ 1519f0fd96f5SChris Wilson GEN8_GT_VECS_IRQ | \ 1520f0fd96f5SChris Wilson GEN8_GT_PM_IRQ | \ 1521f0fd96f5SChris Wilson GEN8_GT_GUC_IRQ) 1522f0fd96f5SChris Wilson 1523abd58f01SBen Widawsky if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15242e4a5b25SChris Wilson gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 15252e4a5b25SChris Wilson if (likely(gt_iir[0])) 15262e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1527abd58f01SBen Widawsky } 1528abd58f01SBen Widawsky 152985f9b5f9SZhao Yakui if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15302e4a5b25SChris Wilson gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 15312e4a5b25SChris Wilson if (likely(gt_iir[1])) 15322e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 153374cdb337SChris Wilson } 153474cdb337SChris Wilson 153526705e20SSagar Arun Kamble if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15362e4a5b25SChris Wilson gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 15372e4a5b25SChris Wilson if (likely(gt_iir[2] & (i915->pm_rps_events | 15382e4a5b25SChris Wilson i915->pm_guc_events))) 15392e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(2), 15402e4a5b25SChris Wilson gt_iir[2] & (i915->pm_rps_events | 15412e4a5b25SChris Wilson i915->pm_guc_events)); 15420961021aSBen Widawsky } 15432e4a5b25SChris Wilson 15442e4a5b25SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15452e4a5b25SChris Wilson gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 15462e4a5b25SChris Wilson if (likely(gt_iir[3])) 15472e4a5b25SChris Wilson raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 154855ef72f2SChris Wilson } 1549abd58f01SBen Widawsky } 1550abd58f01SBen Widawsky 15512e4a5b25SChris Wilson static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1552f0fd96f5SChris Wilson u32 master_ctl, u32 gt_iir[4]) 1553e30e251aSVille Syrjälä { 1554f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 15552e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[RCS], 155651f6b0f9SChris Wilson gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 15572e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[BCS], 155851f6b0f9SChris Wilson gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1559e30e251aSVille Syrjälä } 1560e30e251aSVille Syrjälä 1561f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 15622e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS], 156351f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 15642e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VCS2], 156551f6b0f9SChris Wilson gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1566e30e251aSVille Syrjälä } 1567e30e251aSVille Syrjälä 1568f0fd96f5SChris Wilson if (master_ctl & GEN8_GT_VECS_IRQ) { 15692e4a5b25SChris Wilson gen8_cs_irq_handler(i915->engine[VECS], 157051f6b0f9SChris Wilson gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1571f0fd96f5SChris Wilson } 1572e30e251aSVille Syrjälä 1573f0fd96f5SChris Wilson if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 15742e4a5b25SChris Wilson gen6_rps_irq_handler(i915, gt_iir[2]); 15752e4a5b25SChris Wilson gen9_guc_irq_handler(i915, gt_iir[2]); 1576e30e251aSVille Syrjälä } 1577f0fd96f5SChris Wilson } 1578e30e251aSVille Syrjälä 1579121e758eSDhinakaran Pandiyan static bool gen11_port_hotplug_long_detect(enum port port, u32 val) 1580121e758eSDhinakaran Pandiyan { 1581121e758eSDhinakaran Pandiyan switch (port) { 1582121e758eSDhinakaran Pandiyan case PORT_C: 1583121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1584121e758eSDhinakaran Pandiyan case PORT_D: 1585121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1586121e758eSDhinakaran Pandiyan case PORT_E: 1587121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1588121e758eSDhinakaran Pandiyan case PORT_F: 1589121e758eSDhinakaran Pandiyan return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1590121e758eSDhinakaran Pandiyan default: 1591121e758eSDhinakaran Pandiyan return false; 1592121e758eSDhinakaran Pandiyan } 1593121e758eSDhinakaran Pandiyan } 1594121e758eSDhinakaran Pandiyan 159563c88d22SImre Deak static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 159663c88d22SImre Deak { 159763c88d22SImre Deak switch (port) { 159863c88d22SImre Deak case PORT_A: 1599195baa06SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 160063c88d22SImre Deak case PORT_B: 160163c88d22SImre Deak return val & PORTB_HOTPLUG_LONG_DETECT; 160263c88d22SImre Deak case PORT_C: 160363c88d22SImre Deak return val & PORTC_HOTPLUG_LONG_DETECT; 160463c88d22SImre Deak default: 160563c88d22SImre Deak return false; 160663c88d22SImre Deak } 160763c88d22SImre Deak } 160863c88d22SImre Deak 160931604222SAnusha Srivatsa static bool icp_ddi_port_hotplug_long_detect(enum port port, u32 val) 161031604222SAnusha Srivatsa { 161131604222SAnusha Srivatsa switch (port) { 161231604222SAnusha Srivatsa case PORT_A: 161331604222SAnusha Srivatsa return val & ICP_DDIA_HPD_LONG_DETECT; 161431604222SAnusha Srivatsa case PORT_B: 161531604222SAnusha Srivatsa return val & ICP_DDIB_HPD_LONG_DETECT; 161631604222SAnusha Srivatsa default: 161731604222SAnusha Srivatsa return false; 161831604222SAnusha Srivatsa } 161931604222SAnusha Srivatsa } 162031604222SAnusha Srivatsa 162131604222SAnusha Srivatsa static bool icp_tc_port_hotplug_long_detect(enum port port, u32 val) 162231604222SAnusha Srivatsa { 162331604222SAnusha Srivatsa switch (port) { 162431604222SAnusha Srivatsa case PORT_C: 162531604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 162631604222SAnusha Srivatsa case PORT_D: 162731604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 162831604222SAnusha Srivatsa case PORT_E: 162931604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 163031604222SAnusha Srivatsa case PORT_F: 163131604222SAnusha Srivatsa return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 163231604222SAnusha Srivatsa default: 163331604222SAnusha Srivatsa return false; 163431604222SAnusha Srivatsa } 163531604222SAnusha Srivatsa } 163631604222SAnusha Srivatsa 16376dbf30ceSVille Syrjälä static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 16386dbf30ceSVille Syrjälä { 16396dbf30ceSVille Syrjälä switch (port) { 16406dbf30ceSVille Syrjälä case PORT_E: 16416dbf30ceSVille Syrjälä return val & PORTE_HOTPLUG_LONG_DETECT; 16426dbf30ceSVille Syrjälä default: 16436dbf30ceSVille Syrjälä return false; 16446dbf30ceSVille Syrjälä } 16456dbf30ceSVille Syrjälä } 16466dbf30ceSVille Syrjälä 164774c0b395SVille Syrjälä static bool spt_port_hotplug_long_detect(enum port port, u32 val) 164874c0b395SVille Syrjälä { 164974c0b395SVille Syrjälä switch (port) { 165074c0b395SVille Syrjälä case PORT_A: 165174c0b395SVille Syrjälä return val & PORTA_HOTPLUG_LONG_DETECT; 165274c0b395SVille Syrjälä case PORT_B: 165374c0b395SVille Syrjälä return val & PORTB_HOTPLUG_LONG_DETECT; 165474c0b395SVille Syrjälä case PORT_C: 165574c0b395SVille Syrjälä return val & PORTC_HOTPLUG_LONG_DETECT; 165674c0b395SVille Syrjälä case PORT_D: 165774c0b395SVille Syrjälä return val & PORTD_HOTPLUG_LONG_DETECT; 165874c0b395SVille Syrjälä default: 165974c0b395SVille Syrjälä return false; 166074c0b395SVille Syrjälä } 166174c0b395SVille Syrjälä } 166274c0b395SVille Syrjälä 1663e4ce95aaSVille Syrjälä static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1664e4ce95aaSVille Syrjälä { 1665e4ce95aaSVille Syrjälä switch (port) { 1666e4ce95aaSVille Syrjälä case PORT_A: 1667e4ce95aaSVille Syrjälä return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1668e4ce95aaSVille Syrjälä default: 1669e4ce95aaSVille Syrjälä return false; 1670e4ce95aaSVille Syrjälä } 1671e4ce95aaSVille Syrjälä } 1672e4ce95aaSVille Syrjälä 1673676574dfSJani Nikula static bool pch_port_hotplug_long_detect(enum port port, u32 val) 167413cf5504SDave Airlie { 167513cf5504SDave Airlie switch (port) { 167613cf5504SDave Airlie case PORT_B: 1677676574dfSJani Nikula return val & PORTB_HOTPLUG_LONG_DETECT; 167813cf5504SDave Airlie case PORT_C: 1679676574dfSJani Nikula return val & PORTC_HOTPLUG_LONG_DETECT; 168013cf5504SDave Airlie case PORT_D: 1681676574dfSJani Nikula return val & PORTD_HOTPLUG_LONG_DETECT; 1682676574dfSJani Nikula default: 1683676574dfSJani Nikula return false; 168413cf5504SDave Airlie } 168513cf5504SDave Airlie } 168613cf5504SDave Airlie 1687676574dfSJani Nikula static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 168813cf5504SDave Airlie { 168913cf5504SDave Airlie switch (port) { 169013cf5504SDave Airlie case PORT_B: 1691676574dfSJani Nikula return val & PORTB_HOTPLUG_INT_LONG_PULSE; 169213cf5504SDave Airlie case PORT_C: 1693676574dfSJani Nikula return val & PORTC_HOTPLUG_INT_LONG_PULSE; 169413cf5504SDave Airlie case PORT_D: 1695676574dfSJani Nikula return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1696676574dfSJani Nikula default: 1697676574dfSJani Nikula return false; 169813cf5504SDave Airlie } 169913cf5504SDave Airlie } 170013cf5504SDave Airlie 170142db67d6SVille Syrjälä /* 170242db67d6SVille Syrjälä * Get a bit mask of pins that have triggered, and which ones may be long. 170342db67d6SVille Syrjälä * This can be called multiple times with the same masks to accumulate 170442db67d6SVille Syrjälä * hotplug detection results from several registers. 170542db67d6SVille Syrjälä * 170642db67d6SVille Syrjälä * Note that the caller is expected to zero out the masks initially. 170742db67d6SVille Syrjälä */ 1708cf53902fSRodrigo Vivi static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1709cf53902fSRodrigo Vivi u32 *pin_mask, u32 *long_mask, 17108c841e57SJani Nikula u32 hotplug_trigger, u32 dig_hotplug_reg, 1711fd63e2a9SImre Deak const u32 hpd[HPD_NUM_PINS], 1712fd63e2a9SImre Deak bool long_pulse_detect(enum port port, u32 val)) 1713676574dfSJani Nikula { 17148c841e57SJani Nikula enum port port; 1715*e9be2850SVille Syrjälä enum hpd_pin pin; 1716676574dfSJani Nikula 1717*e9be2850SVille Syrjälä for_each_hpd_pin(pin) { 1718*e9be2850SVille Syrjälä if ((hpd[pin] & hotplug_trigger) == 0) 17198c841e57SJani Nikula continue; 17208c841e57SJani Nikula 1721*e9be2850SVille Syrjälä *pin_mask |= BIT(pin); 1722676574dfSJani Nikula 1723*e9be2850SVille Syrjälä port = intel_hpd_pin_to_port(dev_priv, pin); 1724256cfddeSRodrigo Vivi if (port == PORT_NONE) 1725cc24fcdcSImre Deak continue; 1726cc24fcdcSImre Deak 1727fd63e2a9SImre Deak if (long_pulse_detect(port, dig_hotplug_reg)) 1728*e9be2850SVille Syrjälä *long_mask |= BIT(pin); 1729676574dfSJani Nikula } 1730676574dfSJani Nikula 1731676574dfSJani Nikula DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1732676574dfSJani Nikula hotplug_trigger, dig_hotplug_reg, *pin_mask); 1733676574dfSJani Nikula 1734676574dfSJani Nikula } 1735676574dfSJani Nikula 173691d14251STvrtko Ursulin static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1737515ac2bbSDaniel Vetter { 173828c70f16SDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1739515ac2bbSDaniel Vetter } 1740515ac2bbSDaniel Vetter 174191d14251STvrtko Ursulin static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1742ce99c256SDaniel Vetter { 17439ee32feaSDaniel Vetter wake_up_all(&dev_priv->gmbus_wait_queue); 1744ce99c256SDaniel Vetter } 1745ce99c256SDaniel Vetter 17468bf1e9f1SShuang He #if defined(CONFIG_DEBUG_FS) 174791d14251STvrtko Ursulin static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 174891d14251STvrtko Ursulin enum pipe pipe, 1749eba94eb9SDaniel Vetter uint32_t crc0, uint32_t crc1, 1750eba94eb9SDaniel Vetter uint32_t crc2, uint32_t crc3, 17518bc5e955SDaniel Vetter uint32_t crc4) 17528bf1e9f1SShuang He { 17538bf1e9f1SShuang He struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 17548c6b709dSTomeu Vizoso struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 17558c6b709dSTomeu Vizoso uint32_t crcs[5]; 1756b2c88f5bSDamien Lespiau 1757d538bbdfSDamien Lespiau spin_lock(&pipe_crc->lock); 17588c6b709dSTomeu Vizoso /* 17598c6b709dSTomeu Vizoso * For some not yet identified reason, the first CRC is 17608c6b709dSTomeu Vizoso * bonkers. So let's just wait for the next vblank and read 17618c6b709dSTomeu Vizoso * out the buggy result. 17628c6b709dSTomeu Vizoso * 1763163e8aecSRodrigo Vivi * On GEN8+ sometimes the second CRC is bonkers as well, so 17648c6b709dSTomeu Vizoso * don't trust that one either. 17658c6b709dSTomeu Vizoso */ 1766033b7a23SMaarten Lankhorst if (pipe_crc->skipped <= 0 || 1767163e8aecSRodrigo Vivi (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 17688c6b709dSTomeu Vizoso pipe_crc->skipped++; 17698c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17708c6b709dSTomeu Vizoso return; 17718c6b709dSTomeu Vizoso } 17728c6b709dSTomeu Vizoso spin_unlock(&pipe_crc->lock); 17736cc42152SMaarten Lankhorst 17748c6b709dSTomeu Vizoso crcs[0] = crc0; 17758c6b709dSTomeu Vizoso crcs[1] = crc1; 17768c6b709dSTomeu Vizoso crcs[2] = crc2; 17778c6b709dSTomeu Vizoso crcs[3] = crc3; 17788c6b709dSTomeu Vizoso crcs[4] = crc4; 1779246ee524STomeu Vizoso drm_crtc_add_crc_entry(&crtc->base, true, 1780ca814b25SDaniel Vetter drm_crtc_accurate_vblank_count(&crtc->base), 1781246ee524STomeu Vizoso crcs); 17828c6b709dSTomeu Vizoso } 1783277de95eSDaniel Vetter #else 1784277de95eSDaniel Vetter static inline void 178591d14251STvrtko Ursulin display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 178691d14251STvrtko Ursulin enum pipe pipe, 1787277de95eSDaniel Vetter uint32_t crc0, uint32_t crc1, 1788277de95eSDaniel Vetter uint32_t crc2, uint32_t crc3, 1789277de95eSDaniel Vetter uint32_t crc4) {} 1790277de95eSDaniel Vetter #endif 1791eba94eb9SDaniel Vetter 1792277de95eSDaniel Vetter 179391d14251STvrtko Ursulin static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 179491d14251STvrtko Ursulin enum pipe pipe) 17955a69b89fSDaniel Vetter { 179691d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 17975a69b89fSDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 17985a69b89fSDaniel Vetter 0, 0, 0, 0); 17995a69b89fSDaniel Vetter } 18005a69b89fSDaniel Vetter 180191d14251STvrtko Ursulin static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 180291d14251STvrtko Ursulin enum pipe pipe) 1803eba94eb9SDaniel Vetter { 180491d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 1805eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1806eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1807eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1808eba94eb9SDaniel Vetter I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 18098bc5e955SDaniel Vetter I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1810eba94eb9SDaniel Vetter } 18115b3a856bSDaniel Vetter 181291d14251STvrtko Ursulin static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 181391d14251STvrtko Ursulin enum pipe pipe) 18145b3a856bSDaniel Vetter { 18150b5c5ed0SDaniel Vetter uint32_t res1, res2; 18160b5c5ed0SDaniel Vetter 181791d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 3) 18180b5c5ed0SDaniel Vetter res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 18190b5c5ed0SDaniel Vetter else 18200b5c5ed0SDaniel Vetter res1 = 0; 18210b5c5ed0SDaniel Vetter 182291d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 18230b5c5ed0SDaniel Vetter res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 18240b5c5ed0SDaniel Vetter else 18250b5c5ed0SDaniel Vetter res2 = 0; 18265b3a856bSDaniel Vetter 182791d14251STvrtko Ursulin display_pipe_crc_irq_handler(dev_priv, pipe, 18280b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_RED(pipe)), 18290b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_GREEN(pipe)), 18300b5c5ed0SDaniel Vetter I915_READ(PIPE_CRC_RES_BLUE(pipe)), 18310b5c5ed0SDaniel Vetter res1, res2); 18325b3a856bSDaniel Vetter } 18338bf1e9f1SShuang He 18341403c0d4SPaulo Zanoni /* The RPS events need forcewake, so we add them to a work queue and mask their 18351403c0d4SPaulo Zanoni * IMR bits until the work is done. Other interrupts can be processed without 18361403c0d4SPaulo Zanoni * the work queue. */ 18371403c0d4SPaulo Zanoni static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1838baf02a1fSBen Widawsky { 1839562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 1840562d9baeSSagar Arun Kamble 1841a6706b45SDeepak S if (pm_iir & dev_priv->pm_rps_events) { 184259cdb63dSDaniel Vetter spin_lock(&dev_priv->irq_lock); 1843f4e9af4fSAkash Goel gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1844562d9baeSSagar Arun Kamble if (rps->interrupts_enabled) { 1845562d9baeSSagar Arun Kamble rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1846562d9baeSSagar Arun Kamble schedule_work(&rps->work); 184741a05a3aSDaniel Vetter } 1848d4d70aa5SImre Deak spin_unlock(&dev_priv->irq_lock); 1849d4d70aa5SImre Deak } 1850baf02a1fSBen Widawsky 1851bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 1852c9a9a268SImre Deak return; 1853c9a9a268SImre Deak 18542d1fe073SJoonas Lahtinen if (HAS_VEBOX(dev_priv)) { 185512638c57SBen Widawsky if (pm_iir & PM_VEBOX_USER_INTERRUPT) 18563b3f1650SAkash Goel notify_ring(dev_priv->engine[VECS]); 185712638c57SBen Widawsky 1858aaecdf61SDaniel Vetter if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1859aaecdf61SDaniel Vetter DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 186012638c57SBen Widawsky } 18611403c0d4SPaulo Zanoni } 1862baf02a1fSBen Widawsky 186326705e20SSagar Arun Kamble static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 186426705e20SSagar Arun Kamble { 186593bf8096SMichal Wajdeczko if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 186693bf8096SMichal Wajdeczko intel_guc_to_host_event_handler(&dev_priv->guc); 186726705e20SSagar Arun Kamble } 186826705e20SSagar Arun Kamble 186944d9241eSVille Syrjälä static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 187044d9241eSVille Syrjälä { 187144d9241eSVille Syrjälä enum pipe pipe; 187244d9241eSVille Syrjälä 187344d9241eSVille Syrjälä for_each_pipe(dev_priv, pipe) { 187444d9241eSVille Syrjälä I915_WRITE(PIPESTAT(pipe), 187544d9241eSVille Syrjälä PIPESTAT_INT_STATUS_MASK | 187644d9241eSVille Syrjälä PIPE_FIFO_UNDERRUN_STATUS); 187744d9241eSVille Syrjälä 187844d9241eSVille Syrjälä dev_priv->pipestat_irq_mask[pipe] = 0; 187944d9241eSVille Syrjälä } 188044d9241eSVille Syrjälä } 188144d9241eSVille Syrjälä 1882eb64343cSVille Syrjälä static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 188391d14251STvrtko Ursulin u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 18847e231dbeSJesse Barnes { 18857e231dbeSJesse Barnes int pipe; 18867e231dbeSJesse Barnes 188758ead0d7SImre Deak spin_lock(&dev_priv->irq_lock); 18881ca993d2SVille Syrjälä 18891ca993d2SVille Syrjälä if (!dev_priv->display_irqs_enabled) { 18901ca993d2SVille Syrjälä spin_unlock(&dev_priv->irq_lock); 18911ca993d2SVille Syrjälä return; 18921ca993d2SVille Syrjälä } 18931ca993d2SVille Syrjälä 1894055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 1895f0f59a00SVille Syrjälä i915_reg_t reg; 18966b12ca56SVille Syrjälä u32 status_mask, enable_mask, iir_bit = 0; 189791d181ddSImre Deak 1898bbb5eebfSDaniel Vetter /* 1899bbb5eebfSDaniel Vetter * PIPESTAT bits get signalled even when the interrupt is 1900bbb5eebfSDaniel Vetter * disabled with the mask bits, and some of the status bits do 1901bbb5eebfSDaniel Vetter * not generate interrupts at all (like the underrun bit). Hence 1902bbb5eebfSDaniel Vetter * we need to be careful that we only handle what we want to 1903bbb5eebfSDaniel Vetter * handle. 1904bbb5eebfSDaniel Vetter */ 19050f239f4cSDaniel Vetter 19060f239f4cSDaniel Vetter /* fifo underruns are filterered in the underrun handler. */ 19076b12ca56SVille Syrjälä status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1908bbb5eebfSDaniel Vetter 1909bbb5eebfSDaniel Vetter switch (pipe) { 1910bbb5eebfSDaniel Vetter case PIPE_A: 1911bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1912bbb5eebfSDaniel Vetter break; 1913bbb5eebfSDaniel Vetter case PIPE_B: 1914bbb5eebfSDaniel Vetter iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1915bbb5eebfSDaniel Vetter break; 19163278f67fSVille Syrjälä case PIPE_C: 19173278f67fSVille Syrjälä iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 19183278f67fSVille Syrjälä break; 1919bbb5eebfSDaniel Vetter } 1920bbb5eebfSDaniel Vetter if (iir & iir_bit) 19216b12ca56SVille Syrjälä status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1922bbb5eebfSDaniel Vetter 19236b12ca56SVille Syrjälä if (!status_mask) 192491d181ddSImre Deak continue; 192591d181ddSImre Deak 192691d181ddSImre Deak reg = PIPESTAT(pipe); 19276b12ca56SVille Syrjälä pipe_stats[pipe] = I915_READ(reg) & status_mask; 19286b12ca56SVille Syrjälä enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 19297e231dbeSJesse Barnes 19307e231dbeSJesse Barnes /* 19317e231dbeSJesse Barnes * Clear the PIPE*STAT regs before the IIR 1932132c27c9SVille Syrjälä * 1933132c27c9SVille Syrjälä * Toggle the enable bits to make sure we get an 1934132c27c9SVille Syrjälä * edge in the ISR pipe event bit if we don't clear 1935132c27c9SVille Syrjälä * all the enabled status bits. Otherwise the edge 1936132c27c9SVille Syrjälä * triggered IIR on i965/g4x wouldn't notice that 1937132c27c9SVille Syrjälä * an interrupt is still pending. 19387e231dbeSJesse Barnes */ 1939132c27c9SVille Syrjälä if (pipe_stats[pipe]) { 1940132c27c9SVille Syrjälä I915_WRITE(reg, pipe_stats[pipe]); 1941132c27c9SVille Syrjälä I915_WRITE(reg, enable_mask); 1942132c27c9SVille Syrjälä } 19437e231dbeSJesse Barnes } 194458ead0d7SImre Deak spin_unlock(&dev_priv->irq_lock); 19452ecb8ca4SVille Syrjälä } 19462ecb8ca4SVille Syrjälä 1947eb64343cSVille Syrjälä static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1948eb64343cSVille Syrjälä u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1949eb64343cSVille Syrjälä { 1950eb64343cSVille Syrjälä enum pipe pipe; 1951eb64343cSVille Syrjälä 1952eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1953eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1954eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1955eb64343cSVille Syrjälä 1956eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1957eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1958eb64343cSVille Syrjälä 1959eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1960eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1961eb64343cSVille Syrjälä } 1962eb64343cSVille Syrjälä } 1963eb64343cSVille Syrjälä 1964eb64343cSVille Syrjälä static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1965eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1966eb64343cSVille Syrjälä { 1967eb64343cSVille Syrjälä bool blc_event = false; 1968eb64343cSVille Syrjälä enum pipe pipe; 1969eb64343cSVille Syrjälä 1970eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1971eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1972eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1973eb64343cSVille Syrjälä 1974eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1975eb64343cSVille Syrjälä blc_event = true; 1976eb64343cSVille Syrjälä 1977eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1978eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1979eb64343cSVille Syrjälä 1980eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1981eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1982eb64343cSVille Syrjälä } 1983eb64343cSVille Syrjälä 1984eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1985eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 1986eb64343cSVille Syrjälä } 1987eb64343cSVille Syrjälä 1988eb64343cSVille Syrjälä static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1989eb64343cSVille Syrjälä u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1990eb64343cSVille Syrjälä { 1991eb64343cSVille Syrjälä bool blc_event = false; 1992eb64343cSVille Syrjälä enum pipe pipe; 1993eb64343cSVille Syrjälä 1994eb64343cSVille Syrjälä for_each_pipe(dev_priv, pipe) { 1995eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1996eb64343cSVille Syrjälä drm_handle_vblank(&dev_priv->drm, pipe); 1997eb64343cSVille Syrjälä 1998eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1999eb64343cSVille Syrjälä blc_event = true; 2000eb64343cSVille Syrjälä 2001eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2002eb64343cSVille Syrjälä i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2003eb64343cSVille Syrjälä 2004eb64343cSVille Syrjälä if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2005eb64343cSVille Syrjälä intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2006eb64343cSVille Syrjälä } 2007eb64343cSVille Syrjälä 2008eb64343cSVille Syrjälä if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2009eb64343cSVille Syrjälä intel_opregion_asle_intr(dev_priv); 2010eb64343cSVille Syrjälä 2011eb64343cSVille Syrjälä if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2012eb64343cSVille Syrjälä gmbus_irq_handler(dev_priv); 2013eb64343cSVille Syrjälä } 2014eb64343cSVille Syrjälä 201591d14251STvrtko Ursulin static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 20162ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES]) 20172ecb8ca4SVille Syrjälä { 20182ecb8ca4SVille Syrjälä enum pipe pipe; 20197e231dbeSJesse Barnes 2020055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2021fd3a4024SDaniel Vetter if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2022fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 20234356d586SDaniel Vetter 20244356d586SDaniel Vetter if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 202591d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 20262d9d2b0bSVille Syrjälä 20271f7247c0SDaniel Vetter if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 20281f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 202931acc7f5SJesse Barnes } 203031acc7f5SJesse Barnes 2031c1874ed7SImre Deak if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 203291d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2033c1874ed7SImre Deak } 2034c1874ed7SImre Deak 20351ae3c34cSVille Syrjälä static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 203616c6c56bSVille Syrjälä { 20370ba7c51aSVille Syrjälä u32 hotplug_status = 0, hotplug_status_mask; 20380ba7c51aSVille Syrjälä int i; 203916c6c56bSVille Syrjälä 20400ba7c51aSVille Syrjälä if (IS_G4X(dev_priv) || 20410ba7c51aSVille Syrjälä IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 20420ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 20430ba7c51aSVille Syrjälä DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 20440ba7c51aSVille Syrjälä else 20450ba7c51aSVille Syrjälä hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 20460ba7c51aSVille Syrjälä 20470ba7c51aSVille Syrjälä /* 20480ba7c51aSVille Syrjälä * We absolutely have to clear all the pending interrupt 20490ba7c51aSVille Syrjälä * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 20500ba7c51aSVille Syrjälä * interrupt bit won't have an edge, and the i965/g4x 20510ba7c51aSVille Syrjälä * edge triggered IIR will not notice that an interrupt 20520ba7c51aSVille Syrjälä * is still pending. We can't use PORT_HOTPLUG_EN to 20530ba7c51aSVille Syrjälä * guarantee the edge as the act of toggling the enable 20540ba7c51aSVille Syrjälä * bits can itself generate a new hotplug interrupt :( 20550ba7c51aSVille Syrjälä */ 20560ba7c51aSVille Syrjälä for (i = 0; i < 10; i++) { 20570ba7c51aSVille Syrjälä u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 20580ba7c51aSVille Syrjälä 20590ba7c51aSVille Syrjälä if (tmp == 0) 20600ba7c51aSVille Syrjälä return hotplug_status; 20610ba7c51aSVille Syrjälä 20620ba7c51aSVille Syrjälä hotplug_status |= tmp; 20633ff60f89SOscar Mateo I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 20640ba7c51aSVille Syrjälä } 20650ba7c51aSVille Syrjälä 20660ba7c51aSVille Syrjälä WARN_ONCE(1, 20670ba7c51aSVille Syrjälä "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 20680ba7c51aSVille Syrjälä I915_READ(PORT_HOTPLUG_STAT)); 20691ae3c34cSVille Syrjälä 20701ae3c34cSVille Syrjälä return hotplug_status; 20711ae3c34cSVille Syrjälä } 20721ae3c34cSVille Syrjälä 207391d14251STvrtko Ursulin static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 20741ae3c34cSVille Syrjälä u32 hotplug_status) 20751ae3c34cSVille Syrjälä { 20761ae3c34cSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 20773ff60f89SOscar Mateo 207891d14251STvrtko Ursulin if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 207991d14251STvrtko Ursulin IS_CHERRYVIEW(dev_priv)) { 208016c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 208116c6c56bSVille Syrjälä 208258f2cf24SVille Syrjälä if (hotplug_trigger) { 2083cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2084cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2085cf53902fSRodrigo Vivi hpd_status_g4x, 2086fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 208758f2cf24SVille Syrjälä 208891d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 208958f2cf24SVille Syrjälä } 2090369712e8SJani Nikula 2091369712e8SJani Nikula if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 209291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 209316c6c56bSVille Syrjälä } else { 209416c6c56bSVille Syrjälä u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 209516c6c56bSVille Syrjälä 209658f2cf24SVille Syrjälä if (hotplug_trigger) { 2097cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2098cf53902fSRodrigo Vivi hotplug_trigger, hotplug_trigger, 2099cf53902fSRodrigo Vivi hpd_status_i915, 2100fd63e2a9SImre Deak i9xx_port_hotplug_long_detect); 210191d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 210216c6c56bSVille Syrjälä } 21033ff60f89SOscar Mateo } 210458f2cf24SVille Syrjälä } 210516c6c56bSVille Syrjälä 2106c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2107c1874ed7SImre Deak { 210845a83f84SDaniel Vetter struct drm_device *dev = arg; 2109fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2110c1874ed7SImre Deak irqreturn_t ret = IRQ_NONE; 2111c1874ed7SImre Deak 21122dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21132dd2a883SImre Deak return IRQ_NONE; 21142dd2a883SImre Deak 21151f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 21161f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 21171f814dacSImre Deak 21181e1cace9SVille Syrjälä do { 21196e814800SVille Syrjälä u32 iir, gt_iir, pm_iir; 21202ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 21211ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2122a5e485a9SVille Syrjälä u32 ier = 0; 21233ff60f89SOscar Mateo 2124c1874ed7SImre Deak gt_iir = I915_READ(GTIIR); 2125c1874ed7SImre Deak pm_iir = I915_READ(GEN6_PMIIR); 21263ff60f89SOscar Mateo iir = I915_READ(VLV_IIR); 2127c1874ed7SImre Deak 2128c1874ed7SImre Deak if (gt_iir == 0 && pm_iir == 0 && iir == 0) 21291e1cace9SVille Syrjälä break; 2130c1874ed7SImre Deak 2131c1874ed7SImre Deak ret = IRQ_HANDLED; 2132c1874ed7SImre Deak 2133a5e485a9SVille Syrjälä /* 2134a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2135a5e485a9SVille Syrjälä * 2136a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2137a5e485a9SVille Syrjälä * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2138a5e485a9SVille Syrjälä * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2139a5e485a9SVille Syrjälä * 2140a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2141a5e485a9SVille Syrjälä * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2142a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2143a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2144a5e485a9SVille Syrjälä * bits this time around. 2145a5e485a9SVille Syrjälä */ 21464a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 2147a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2148a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 21494a0a0202SVille Syrjälä 21504a0a0202SVille Syrjälä if (gt_iir) 21514a0a0202SVille Syrjälä I915_WRITE(GTIIR, gt_iir); 21524a0a0202SVille Syrjälä if (pm_iir) 21534a0a0202SVille Syrjälä I915_WRITE(GEN6_PMIIR, pm_iir); 21544a0a0202SVille Syrjälä 21557ce4d1f2SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 21561ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 21577ce4d1f2SVille Syrjälä 21583ff60f89SOscar Mateo /* Call regardless, as some status bits might not be 21593ff60f89SOscar Mateo * signalled in iir */ 2160eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 21617ce4d1f2SVille Syrjälä 2162eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2163eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT)) 2164eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2165eef57324SJerome Anand 21667ce4d1f2SVille Syrjälä /* 21677ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 21687ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 21697ce4d1f2SVille Syrjälä */ 21707ce4d1f2SVille Syrjälä if (iir) 21717ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 21724a0a0202SVille Syrjälä 2173a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 21744a0a0202SVille Syrjälä I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 21751ae3c34cSVille Syrjälä 217652894874SVille Syrjälä if (gt_iir) 2177261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 217852894874SVille Syrjälä if (pm_iir) 217952894874SVille Syrjälä gen6_rps_irq_handler(dev_priv, pm_iir); 218052894874SVille Syrjälä 21811ae3c34cSVille Syrjälä if (hotplug_status) 218291d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 21832ecb8ca4SVille Syrjälä 218491d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 21851e1cace9SVille Syrjälä } while (0); 21867e231dbeSJesse Barnes 21871f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 21881f814dacSImre Deak 21897e231dbeSJesse Barnes return ret; 21907e231dbeSJesse Barnes } 21917e231dbeSJesse Barnes 219243f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg) 219343f328d7SVille Syrjälä { 219445a83f84SDaniel Vetter struct drm_device *dev = arg; 2195fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 219643f328d7SVille Syrjälä irqreturn_t ret = IRQ_NONE; 219743f328d7SVille Syrjälä 21982dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 21992dd2a883SImre Deak return IRQ_NONE; 22002dd2a883SImre Deak 22011f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 22021f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 22031f814dacSImre Deak 2204579de73bSChris Wilson do { 22056e814800SVille Syrjälä u32 master_ctl, iir; 22062ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 22071ae3c34cSVille Syrjälä u32 hotplug_status = 0; 2208f0fd96f5SChris Wilson u32 gt_iir[4]; 2209a5e485a9SVille Syrjälä u32 ier = 0; 2210a5e485a9SVille Syrjälä 22118e5fd599SVille Syrjälä master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 22123278f67fSVille Syrjälä iir = I915_READ(VLV_IIR); 22133278f67fSVille Syrjälä 22143278f67fSVille Syrjälä if (master_ctl == 0 && iir == 0) 22158e5fd599SVille Syrjälä break; 221643f328d7SVille Syrjälä 221727b6c122SOscar Mateo ret = IRQ_HANDLED; 221827b6c122SOscar Mateo 2219a5e485a9SVille Syrjälä /* 2220a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 2221a5e485a9SVille Syrjälä * 2222a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 2223a5e485a9SVille Syrjälä * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2224a5e485a9SVille Syrjälä * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2225a5e485a9SVille Syrjälä * 2226a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2227a5e485a9SVille Syrjälä * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2228a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 2229a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2230a5e485a9SVille Syrjälä * bits this time around. 2231a5e485a9SVille Syrjälä */ 223243f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 2233a5e485a9SVille Syrjälä ier = I915_READ(VLV_IER); 2234a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, 0); 223543f328d7SVille Syrjälä 2236e30e251aSVille Syrjälä gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 223727b6c122SOscar Mateo 223827b6c122SOscar Mateo if (iir & I915_DISPLAY_PORT_INTERRUPT) 22391ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 224043f328d7SVille Syrjälä 224127b6c122SOscar Mateo /* Call regardless, as some status bits might not be 224227b6c122SOscar Mateo * signalled in iir */ 2243eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 224443f328d7SVille Syrjälä 2245eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2246eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT | 2247eef57324SJerome Anand I915_LPE_PIPE_C_INTERRUPT)) 2248eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 2249eef57324SJerome Anand 22507ce4d1f2SVille Syrjälä /* 22517ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 22527ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 22537ce4d1f2SVille Syrjälä */ 22547ce4d1f2SVille Syrjälä if (iir) 22557ce4d1f2SVille Syrjälä I915_WRITE(VLV_IIR, iir); 22567ce4d1f2SVille Syrjälä 2257a5e485a9SVille Syrjälä I915_WRITE(VLV_IER, ier); 2258e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 22591ae3c34cSVille Syrjälä 2260f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2261e30e251aSVille Syrjälä 22621ae3c34cSVille Syrjälä if (hotplug_status) 226391d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 22642ecb8ca4SVille Syrjälä 226591d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2266579de73bSChris Wilson } while (0); 22673278f67fSVille Syrjälä 22681f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 22691f814dacSImre Deak 227043f328d7SVille Syrjälä return ret; 227143f328d7SVille Syrjälä } 227243f328d7SVille Syrjälä 227391d14251STvrtko Ursulin static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 227491d14251STvrtko Ursulin u32 hotplug_trigger, 227540e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2276776ad806SJesse Barnes { 227742db67d6SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2278776ad806SJesse Barnes 22796a39d7c9SJani Nikula /* 22806a39d7c9SJani Nikula * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 22816a39d7c9SJani Nikula * unless we touch the hotplug register, even if hotplug_trigger is 22826a39d7c9SJani Nikula * zero. Not acking leads to "The master control interrupt lied (SDE)!" 22836a39d7c9SJani Nikula * errors. 22846a39d7c9SJani Nikula */ 228513cf5504SDave Airlie dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 22866a39d7c9SJani Nikula if (!hotplug_trigger) { 22876a39d7c9SJani Nikula u32 mask = PORTA_HOTPLUG_STATUS_MASK | 22886a39d7c9SJani Nikula PORTD_HOTPLUG_STATUS_MASK | 22896a39d7c9SJani Nikula PORTC_HOTPLUG_STATUS_MASK | 22906a39d7c9SJani Nikula PORTB_HOTPLUG_STATUS_MASK; 22916a39d7c9SJani Nikula dig_hotplug_reg &= ~mask; 22926a39d7c9SJani Nikula } 22936a39d7c9SJani Nikula 229413cf5504SDave Airlie I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 22956a39d7c9SJani Nikula if (!hotplug_trigger) 22966a39d7c9SJani Nikula return; 229713cf5504SDave Airlie 2298cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 229940e56410SVille Syrjälä dig_hotplug_reg, hpd, 2300fd63e2a9SImre Deak pch_port_hotplug_long_detect); 230140e56410SVille Syrjälä 230291d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2303aaf5ec2eSSonika Jindal } 230491d131d2SDaniel Vetter 230591d14251STvrtko Ursulin static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 230640e56410SVille Syrjälä { 230740e56410SVille Syrjälä int pipe; 230840e56410SVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 230940e56410SVille Syrjälä 231091d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 231140e56410SVille Syrjälä 2312cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK) { 2313cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2314776ad806SJesse Barnes SDE_AUDIO_POWER_SHIFT); 2315cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2316cfc33bf7SVille Syrjälä port_name(port)); 2317cfc33bf7SVille Syrjälä } 2318776ad806SJesse Barnes 2319ce99c256SDaniel Vetter if (pch_iir & SDE_AUX_MASK) 232091d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2321ce99c256SDaniel Vetter 2322776ad806SJesse Barnes if (pch_iir & SDE_GMBUS) 232391d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 2324776ad806SJesse Barnes 2325776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_HDCP_MASK) 2326776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2327776ad806SJesse Barnes 2328776ad806SJesse Barnes if (pch_iir & SDE_AUDIO_TRANS_MASK) 2329776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2330776ad806SJesse Barnes 2331776ad806SJesse Barnes if (pch_iir & SDE_POISON) 2332776ad806SJesse Barnes DRM_ERROR("PCH poison interrupt\n"); 2333776ad806SJesse Barnes 23349db4a9c7SJesse Barnes if (pch_iir & SDE_FDI_MASK) 2335055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 23369db4a9c7SJesse Barnes DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 23379db4a9c7SJesse Barnes pipe_name(pipe), 23389db4a9c7SJesse Barnes I915_READ(FDI_RX_IIR(pipe))); 2339776ad806SJesse Barnes 2340776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2341776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2342776ad806SJesse Barnes 2343776ad806SJesse Barnes if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2344776ad806SJesse Barnes DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2345776ad806SJesse Barnes 2346776ad806SJesse Barnes if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2347a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 23488664281bSPaulo Zanoni 23498664281bSPaulo Zanoni if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2350a2196033SMatthias Kaehlcke intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 23518664281bSPaulo Zanoni } 23528664281bSPaulo Zanoni 235391d14251STvrtko Ursulin static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 23548664281bSPaulo Zanoni { 23558664281bSPaulo Zanoni u32 err_int = I915_READ(GEN7_ERR_INT); 23565a69b89fSDaniel Vetter enum pipe pipe; 23578664281bSPaulo Zanoni 2358de032bf4SPaulo Zanoni if (err_int & ERR_INT_POISON) 2359de032bf4SPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2360de032bf4SPaulo Zanoni 2361055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 23621f7247c0SDaniel Vetter if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 23631f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 23648664281bSPaulo Zanoni 23655a69b89fSDaniel Vetter if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 236691d14251STvrtko Ursulin if (IS_IVYBRIDGE(dev_priv)) 236791d14251STvrtko Ursulin ivb_pipe_crc_irq_handler(dev_priv, pipe); 23685a69b89fSDaniel Vetter else 236991d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 23705a69b89fSDaniel Vetter } 23715a69b89fSDaniel Vetter } 23728bf1e9f1SShuang He 23738664281bSPaulo Zanoni I915_WRITE(GEN7_ERR_INT, err_int); 23748664281bSPaulo Zanoni } 23758664281bSPaulo Zanoni 237691d14251STvrtko Ursulin static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 23778664281bSPaulo Zanoni { 23788664281bSPaulo Zanoni u32 serr_int = I915_READ(SERR_INT); 237945c1cd87SMika Kahola enum pipe pipe; 23808664281bSPaulo Zanoni 2381de032bf4SPaulo Zanoni if (serr_int & SERR_INT_POISON) 2382de032bf4SPaulo Zanoni DRM_ERROR("PCH poison interrupt\n"); 2383de032bf4SPaulo Zanoni 238445c1cd87SMika Kahola for_each_pipe(dev_priv, pipe) 238545c1cd87SMika Kahola if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 238645c1cd87SMika Kahola intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 23878664281bSPaulo Zanoni 23888664281bSPaulo Zanoni I915_WRITE(SERR_INT, serr_int); 2389776ad806SJesse Barnes } 2390776ad806SJesse Barnes 239191d14251STvrtko Ursulin static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 239223e81d69SAdam Jackson { 239323e81d69SAdam Jackson int pipe; 23946dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2395aaf5ec2eSSonika Jindal 239691d14251STvrtko Ursulin ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 239791d131d2SDaniel Vetter 2398cfc33bf7SVille Syrjälä if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2399cfc33bf7SVille Syrjälä int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 240023e81d69SAdam Jackson SDE_AUDIO_POWER_SHIFT_CPT); 2401cfc33bf7SVille Syrjälä DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2402cfc33bf7SVille Syrjälä port_name(port)); 2403cfc33bf7SVille Syrjälä } 240423e81d69SAdam Jackson 240523e81d69SAdam Jackson if (pch_iir & SDE_AUX_MASK_CPT) 240691d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 240723e81d69SAdam Jackson 240823e81d69SAdam Jackson if (pch_iir & SDE_GMBUS_CPT) 240991d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 241023e81d69SAdam Jackson 241123e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 241223e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 241323e81d69SAdam Jackson 241423e81d69SAdam Jackson if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 241523e81d69SAdam Jackson DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 241623e81d69SAdam Jackson 241723e81d69SAdam Jackson if (pch_iir & SDE_FDI_MASK_CPT) 2418055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 241923e81d69SAdam Jackson DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 242023e81d69SAdam Jackson pipe_name(pipe), 242123e81d69SAdam Jackson I915_READ(FDI_RX_IIR(pipe))); 24228664281bSPaulo Zanoni 24238664281bSPaulo Zanoni if (pch_iir & SDE_ERROR_CPT) 242491d14251STvrtko Ursulin cpt_serr_int_handler(dev_priv); 242523e81d69SAdam Jackson } 242623e81d69SAdam Jackson 242731604222SAnusha Srivatsa static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 242831604222SAnusha Srivatsa { 242931604222SAnusha Srivatsa u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 243031604222SAnusha Srivatsa u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 243131604222SAnusha Srivatsa u32 pin_mask = 0, long_mask = 0; 243231604222SAnusha Srivatsa 243331604222SAnusha Srivatsa if (ddi_hotplug_trigger) { 243431604222SAnusha Srivatsa u32 dig_hotplug_reg; 243531604222SAnusha Srivatsa 243631604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 243731604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 243831604222SAnusha Srivatsa 243931604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 244031604222SAnusha Srivatsa ddi_hotplug_trigger, 244131604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 244231604222SAnusha Srivatsa icp_ddi_port_hotplug_long_detect); 244331604222SAnusha Srivatsa } 244431604222SAnusha Srivatsa 244531604222SAnusha Srivatsa if (tc_hotplug_trigger) { 244631604222SAnusha Srivatsa u32 dig_hotplug_reg; 244731604222SAnusha Srivatsa 244831604222SAnusha Srivatsa dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 244931604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 245031604222SAnusha Srivatsa 245131604222SAnusha Srivatsa intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 245231604222SAnusha Srivatsa tc_hotplug_trigger, 245331604222SAnusha Srivatsa dig_hotplug_reg, hpd_icp, 245431604222SAnusha Srivatsa icp_tc_port_hotplug_long_detect); 245531604222SAnusha Srivatsa } 245631604222SAnusha Srivatsa 245731604222SAnusha Srivatsa if (pin_mask) 245831604222SAnusha Srivatsa intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 245931604222SAnusha Srivatsa 246031604222SAnusha Srivatsa if (pch_iir & SDE_GMBUS_ICP) 246131604222SAnusha Srivatsa gmbus_irq_handler(dev_priv); 246231604222SAnusha Srivatsa } 246331604222SAnusha Srivatsa 246491d14251STvrtko Ursulin static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 24656dbf30ceSVille Syrjälä { 24666dbf30ceSVille Syrjälä u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 24676dbf30ceSVille Syrjälä ~SDE_PORTE_HOTPLUG_SPT; 24686dbf30ceSVille Syrjälä u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 24696dbf30ceSVille Syrjälä u32 pin_mask = 0, long_mask = 0; 24706dbf30ceSVille Syrjälä 24716dbf30ceSVille Syrjälä if (hotplug_trigger) { 24726dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24736dbf30ceSVille Syrjälä 24746dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 24756dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 24766dbf30ceSVille Syrjälä 2477cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2478cf53902fSRodrigo Vivi hotplug_trigger, dig_hotplug_reg, hpd_spt, 247974c0b395SVille Syrjälä spt_port_hotplug_long_detect); 24806dbf30ceSVille Syrjälä } 24816dbf30ceSVille Syrjälä 24826dbf30ceSVille Syrjälä if (hotplug2_trigger) { 24836dbf30ceSVille Syrjälä u32 dig_hotplug_reg; 24846dbf30ceSVille Syrjälä 24856dbf30ceSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 24866dbf30ceSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 24876dbf30ceSVille Syrjälä 2488cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2489cf53902fSRodrigo Vivi hotplug2_trigger, dig_hotplug_reg, hpd_spt, 24906dbf30ceSVille Syrjälä spt_port_hotplug2_long_detect); 24916dbf30ceSVille Syrjälä } 24926dbf30ceSVille Syrjälä 24936dbf30ceSVille Syrjälä if (pin_mask) 249491d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 24956dbf30ceSVille Syrjälä 24966dbf30ceSVille Syrjälä if (pch_iir & SDE_GMBUS_CPT) 249791d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 24986dbf30ceSVille Syrjälä } 24996dbf30ceSVille Syrjälä 250091d14251STvrtko Ursulin static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 250191d14251STvrtko Ursulin u32 hotplug_trigger, 250240e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2503c008bc6eSPaulo Zanoni { 2504e4ce95aaSVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2505e4ce95aaSVille Syrjälä 2506e4ce95aaSVille Syrjälä dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2507e4ce95aaSVille Syrjälä I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2508e4ce95aaSVille Syrjälä 2509cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 251040e56410SVille Syrjälä dig_hotplug_reg, hpd, 2511e4ce95aaSVille Syrjälä ilk_port_hotplug_long_detect); 251240e56410SVille Syrjälä 251391d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2514e4ce95aaSVille Syrjälä } 2515c008bc6eSPaulo Zanoni 251691d14251STvrtko Ursulin static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 251791d14251STvrtko Ursulin u32 de_iir) 251840e56410SVille Syrjälä { 251940e56410SVille Syrjälä enum pipe pipe; 252040e56410SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 252140e56410SVille Syrjälä 252240e56410SVille Syrjälä if (hotplug_trigger) 252391d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 252440e56410SVille Syrjälä 2525c008bc6eSPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A) 252691d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2527c008bc6eSPaulo Zanoni 2528c008bc6eSPaulo Zanoni if (de_iir & DE_GSE) 252991d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2530c008bc6eSPaulo Zanoni 2531c008bc6eSPaulo Zanoni if (de_iir & DE_POISON) 2532c008bc6eSPaulo Zanoni DRM_ERROR("Poison interrupt\n"); 2533c008bc6eSPaulo Zanoni 2534055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2535fd3a4024SDaniel Vetter if (de_iir & DE_PIPE_VBLANK(pipe)) 2536fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2537c008bc6eSPaulo Zanoni 253840da17c2SDaniel Vetter if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 25391f7247c0SDaniel Vetter intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2540c008bc6eSPaulo Zanoni 254140da17c2SDaniel Vetter if (de_iir & DE_PIPE_CRC_DONE(pipe)) 254291d14251STvrtko Ursulin i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2543c008bc6eSPaulo Zanoni } 2544c008bc6eSPaulo Zanoni 2545c008bc6eSPaulo Zanoni /* check event from PCH */ 2546c008bc6eSPaulo Zanoni if (de_iir & DE_PCH_EVENT) { 2547c008bc6eSPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 2548c008bc6eSPaulo Zanoni 254991d14251STvrtko Ursulin if (HAS_PCH_CPT(dev_priv)) 255091d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 2551c008bc6eSPaulo Zanoni else 255291d14251STvrtko Ursulin ibx_irq_handler(dev_priv, pch_iir); 2553c008bc6eSPaulo Zanoni 2554c008bc6eSPaulo Zanoni /* should clear PCH hotplug event before clear CPU irq */ 2555c008bc6eSPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 2556c008bc6eSPaulo Zanoni } 2557c008bc6eSPaulo Zanoni 255891d14251STvrtko Ursulin if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 255991d14251STvrtko Ursulin ironlake_rps_change_irq_handler(dev_priv); 2560c008bc6eSPaulo Zanoni } 2561c008bc6eSPaulo Zanoni 256291d14251STvrtko Ursulin static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 256391d14251STvrtko Ursulin u32 de_iir) 25649719fb98SPaulo Zanoni { 256507d27e20SDamien Lespiau enum pipe pipe; 256623bb4cb5SVille Syrjälä u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 256723bb4cb5SVille Syrjälä 256840e56410SVille Syrjälä if (hotplug_trigger) 256991d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 25709719fb98SPaulo Zanoni 25719719fb98SPaulo Zanoni if (de_iir & DE_ERR_INT_IVB) 257291d14251STvrtko Ursulin ivb_err_int_handler(dev_priv); 25739719fb98SPaulo Zanoni 257454fd3149SDhinakaran Pandiyan if (de_iir & DE_EDP_PSR_INT_HSW) { 257554fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 257654fd3149SDhinakaran Pandiyan 257754fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 257854fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 257954fd3149SDhinakaran Pandiyan } 2580fc340442SDaniel Vetter 25819719fb98SPaulo Zanoni if (de_iir & DE_AUX_CHANNEL_A_IVB) 258291d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 25839719fb98SPaulo Zanoni 25849719fb98SPaulo Zanoni if (de_iir & DE_GSE_IVB) 258591d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 25869719fb98SPaulo Zanoni 2587055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2588fd3a4024SDaniel Vetter if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2589fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 25909719fb98SPaulo Zanoni } 25919719fb98SPaulo Zanoni 25929719fb98SPaulo Zanoni /* check event from PCH */ 259391d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 25949719fb98SPaulo Zanoni u32 pch_iir = I915_READ(SDEIIR); 25959719fb98SPaulo Zanoni 259691d14251STvrtko Ursulin cpt_irq_handler(dev_priv, pch_iir); 25979719fb98SPaulo Zanoni 25989719fb98SPaulo Zanoni /* clear PCH hotplug event before clear CPU irq */ 25999719fb98SPaulo Zanoni I915_WRITE(SDEIIR, pch_iir); 26009719fb98SPaulo Zanoni } 26019719fb98SPaulo Zanoni } 26029719fb98SPaulo Zanoni 260372c90f62SOscar Mateo /* 260472c90f62SOscar Mateo * To handle irqs with the minimum potential races with fresh interrupts, we: 260572c90f62SOscar Mateo * 1 - Disable Master Interrupt Control. 260672c90f62SOscar Mateo * 2 - Find the source(s) of the interrupt. 260772c90f62SOscar Mateo * 3 - Clear the Interrupt Identity bits (IIR). 260872c90f62SOscar Mateo * 4 - Process the interrupt(s) that had bits set in the IIRs. 260972c90f62SOscar Mateo * 5 - Re-enable Master Interrupt Control. 261072c90f62SOscar Mateo */ 2611f1af8fc1SPaulo Zanoni static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2612b1f14ad0SJesse Barnes { 261345a83f84SDaniel Vetter struct drm_device *dev = arg; 2614fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 2615f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 26160e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 2617b1f14ad0SJesse Barnes 26182dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 26192dd2a883SImre Deak return IRQ_NONE; 26202dd2a883SImre Deak 26211f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26221f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 26231f814dacSImre Deak 2624b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 2625b1f14ad0SJesse Barnes de_ier = I915_READ(DEIER); 2626b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 26270e43406bSChris Wilson 262844498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 262944498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 263044498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 263144498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 263244498aeaSPaulo Zanoni * due to its back queue). */ 263391d14251STvrtko Ursulin if (!HAS_PCH_NOP(dev_priv)) { 263444498aeaSPaulo Zanoni sde_ier = I915_READ(SDEIER); 263544498aeaSPaulo Zanoni I915_WRITE(SDEIER, 0); 2636ab5c608bSBen Widawsky } 263744498aeaSPaulo Zanoni 263872c90f62SOscar Mateo /* Find, clear, then process each source of interrupt */ 263972c90f62SOscar Mateo 26400e43406bSChris Wilson gt_iir = I915_READ(GTIIR); 26410e43406bSChris Wilson if (gt_iir) { 264272c90f62SOscar Mateo I915_WRITE(GTIIR, gt_iir); 264372c90f62SOscar Mateo ret = IRQ_HANDLED; 264491d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 2645261e40b8SVille Syrjälä snb_gt_irq_handler(dev_priv, gt_iir); 2646d8fc8a47SPaulo Zanoni else 2647261e40b8SVille Syrjälä ilk_gt_irq_handler(dev_priv, gt_iir); 26480e43406bSChris Wilson } 2649b1f14ad0SJesse Barnes 2650b1f14ad0SJesse Barnes de_iir = I915_READ(DEIIR); 26510e43406bSChris Wilson if (de_iir) { 265272c90f62SOscar Mateo I915_WRITE(DEIIR, de_iir); 265372c90f62SOscar Mateo ret = IRQ_HANDLED; 265491d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) 265591d14251STvrtko Ursulin ivb_display_irq_handler(dev_priv, de_iir); 2656f1af8fc1SPaulo Zanoni else 265791d14251STvrtko Ursulin ilk_display_irq_handler(dev_priv, de_iir); 26580e43406bSChris Wilson } 26590e43406bSChris Wilson 266091d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 2661f1af8fc1SPaulo Zanoni u32 pm_iir = I915_READ(GEN6_PMIIR); 26620e43406bSChris Wilson if (pm_iir) { 2663b1f14ad0SJesse Barnes I915_WRITE(GEN6_PMIIR, pm_iir); 26640e43406bSChris Wilson ret = IRQ_HANDLED; 266572c90f62SOscar Mateo gen6_rps_irq_handler(dev_priv, pm_iir); 26660e43406bSChris Wilson } 2667f1af8fc1SPaulo Zanoni } 2668b1f14ad0SJesse Barnes 2669b1f14ad0SJesse Barnes I915_WRITE(DEIER, de_ier); 267074093f3eSChris Wilson if (!HAS_PCH_NOP(dev_priv)) 267144498aeaSPaulo Zanoni I915_WRITE(SDEIER, sde_ier); 2672b1f14ad0SJesse Barnes 26731f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 26741f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 26751f814dacSImre Deak 2676b1f14ad0SJesse Barnes return ret; 2677b1f14ad0SJesse Barnes } 2678b1f14ad0SJesse Barnes 267991d14251STvrtko Ursulin static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 268091d14251STvrtko Ursulin u32 hotplug_trigger, 268140e56410SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 2682d04a492dSShashank Sharma { 2683cebd87a0SVille Syrjälä u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2684d04a492dSShashank Sharma 2685a52bb15bSVille Syrjälä dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2686a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2687d04a492dSShashank Sharma 2688cf53902fSRodrigo Vivi intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 268940e56410SVille Syrjälä dig_hotplug_reg, hpd, 2690cebd87a0SVille Syrjälä bxt_port_hotplug_long_detect); 269140e56410SVille Syrjälä 269291d14251STvrtko Ursulin intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2693d04a492dSShashank Sharma } 2694d04a492dSShashank Sharma 2695121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2696121e758eSDhinakaran Pandiyan { 2697121e758eSDhinakaran Pandiyan u32 pin_mask = 0, long_mask = 0; 2698b796b971SDhinakaran Pandiyan u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2699b796b971SDhinakaran Pandiyan u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2700121e758eSDhinakaran Pandiyan 2701121e758eSDhinakaran Pandiyan if (trigger_tc) { 2702b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2703b796b971SDhinakaran Pandiyan 2704121e758eSDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2705121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2706121e758eSDhinakaran Pandiyan 2707121e758eSDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2708b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2709121e758eSDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2710121e758eSDhinakaran Pandiyan } 2711b796b971SDhinakaran Pandiyan 2712b796b971SDhinakaran Pandiyan if (trigger_tbt) { 2713b796b971SDhinakaran Pandiyan u32 dig_hotplug_reg; 2714b796b971SDhinakaran Pandiyan 2715b796b971SDhinakaran Pandiyan dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2716b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2717b796b971SDhinakaran Pandiyan 2718b796b971SDhinakaran Pandiyan intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2719b796b971SDhinakaran Pandiyan dig_hotplug_reg, hpd_gen11, 2720b796b971SDhinakaran Pandiyan gen11_port_hotplug_long_detect); 2721b796b971SDhinakaran Pandiyan } 2722b796b971SDhinakaran Pandiyan 2723b796b971SDhinakaran Pandiyan if (pin_mask) 2724b796b971SDhinakaran Pandiyan intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2725b796b971SDhinakaran Pandiyan else 2726b796b971SDhinakaran Pandiyan DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2727121e758eSDhinakaran Pandiyan } 2728121e758eSDhinakaran Pandiyan 2729f11a0f46STvrtko Ursulin static irqreturn_t 2730f11a0f46STvrtko Ursulin gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2731abd58f01SBen Widawsky { 2732abd58f01SBen Widawsky irqreturn_t ret = IRQ_NONE; 2733f11a0f46STvrtko Ursulin u32 iir; 2734c42664ccSDaniel Vetter enum pipe pipe; 273588e04703SJesse Barnes 2736abd58f01SBen Widawsky if (master_ctl & GEN8_DE_MISC_IRQ) { 2737e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_MISC_IIR); 2738e32192e1STvrtko Ursulin if (iir) { 2739e04f7eceSVille Syrjälä bool found = false; 2740e04f7eceSVille Syrjälä 2741e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_MISC_IIR, iir); 2742abd58f01SBen Widawsky ret = IRQ_HANDLED; 2743e04f7eceSVille Syrjälä 2744e04f7eceSVille Syrjälä if (iir & GEN8_DE_MISC_GSE) { 274591d14251STvrtko Ursulin intel_opregion_asle_intr(dev_priv); 2746e04f7eceSVille Syrjälä found = true; 2747e04f7eceSVille Syrjälä } 2748e04f7eceSVille Syrjälä 2749e04f7eceSVille Syrjälä if (iir & GEN8_DE_EDP_PSR) { 275054fd3149SDhinakaran Pandiyan u32 psr_iir = I915_READ(EDP_PSR_IIR); 275154fd3149SDhinakaran Pandiyan 275254fd3149SDhinakaran Pandiyan intel_psr_irq_handler(dev_priv, psr_iir); 275354fd3149SDhinakaran Pandiyan I915_WRITE(EDP_PSR_IIR, psr_iir); 2754e04f7eceSVille Syrjälä found = true; 2755e04f7eceSVille Syrjälä } 2756e04f7eceSVille Syrjälä 2757e04f7eceSVille Syrjälä if (!found) 275838cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Misc interrupt\n"); 2759abd58f01SBen Widawsky } 276038cc46d7SOscar Mateo else 276138cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2762abd58f01SBen Widawsky } 2763abd58f01SBen Widawsky 2764121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2765121e758eSDhinakaran Pandiyan iir = I915_READ(GEN11_DE_HPD_IIR); 2766121e758eSDhinakaran Pandiyan if (iir) { 2767121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IIR, iir); 2768121e758eSDhinakaran Pandiyan ret = IRQ_HANDLED; 2769121e758eSDhinakaran Pandiyan gen11_hpd_irq_handler(dev_priv, iir); 2770121e758eSDhinakaran Pandiyan } else { 2771121e758eSDhinakaran Pandiyan DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2772121e758eSDhinakaran Pandiyan } 2773121e758eSDhinakaran Pandiyan } 2774121e758eSDhinakaran Pandiyan 27756d766f02SDaniel Vetter if (master_ctl & GEN8_DE_PORT_IRQ) { 2776e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PORT_IIR); 2777e32192e1STvrtko Ursulin if (iir) { 2778e32192e1STvrtko Ursulin u32 tmp_mask; 2779d04a492dSShashank Sharma bool found = false; 2780cebd87a0SVille Syrjälä 2781e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PORT_IIR, iir); 27826d766f02SDaniel Vetter ret = IRQ_HANDLED; 278388e04703SJesse Barnes 2784e32192e1STvrtko Ursulin tmp_mask = GEN8_AUX_CHANNEL_A; 2785bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2786e32192e1STvrtko Ursulin tmp_mask |= GEN9_AUX_CHANNEL_B | 2787e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_C | 2788e32192e1STvrtko Ursulin GEN9_AUX_CHANNEL_D; 2789e32192e1STvrtko Ursulin 2790bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 2791bb187e93SJames Ausmus tmp_mask |= ICL_AUX_CHANNEL_E; 2792bb187e93SJames Ausmus 27939bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || 27949bb635d9SDhinakaran Pandiyan INTEL_GEN(dev_priv) >= 11) 2795a324fcacSRodrigo Vivi tmp_mask |= CNL_AUX_CHANNEL_F; 2796a324fcacSRodrigo Vivi 2797e32192e1STvrtko Ursulin if (iir & tmp_mask) { 279891d14251STvrtko Ursulin dp_aux_irq_handler(dev_priv); 2799d04a492dSShashank Sharma found = true; 2800d04a492dSShashank Sharma } 2801d04a492dSShashank Sharma 2802cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) { 2803e32192e1STvrtko Ursulin tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2804e32192e1STvrtko Ursulin if (tmp_mask) { 280591d14251STvrtko Ursulin bxt_hpd_irq_handler(dev_priv, tmp_mask, 280691d14251STvrtko Ursulin hpd_bxt); 2807d04a492dSShashank Sharma found = true; 2808d04a492dSShashank Sharma } 2809e32192e1STvrtko Ursulin } else if (IS_BROADWELL(dev_priv)) { 2810e32192e1STvrtko Ursulin tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2811e32192e1STvrtko Ursulin if (tmp_mask) { 281291d14251STvrtko Ursulin ilk_hpd_irq_handler(dev_priv, 281391d14251STvrtko Ursulin tmp_mask, hpd_bdw); 2814e32192e1STvrtko Ursulin found = true; 2815e32192e1STvrtko Ursulin } 2816e32192e1STvrtko Ursulin } 2817d04a492dSShashank Sharma 2818cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 281991d14251STvrtko Ursulin gmbus_irq_handler(dev_priv); 28209e63743eSShashank Sharma found = true; 28219e63743eSShashank Sharma } 28229e63743eSShashank Sharma 2823d04a492dSShashank Sharma if (!found) 282438cc46d7SOscar Mateo DRM_ERROR("Unexpected DE Port interrupt\n"); 28256d766f02SDaniel Vetter } 282638cc46d7SOscar Mateo else 282738cc46d7SOscar Mateo DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 28286d766f02SDaniel Vetter } 28296d766f02SDaniel Vetter 2830055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) { 2831fd3a4024SDaniel Vetter u32 fault_errors; 2832abd58f01SBen Widawsky 2833c42664ccSDaniel Vetter if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2834c42664ccSDaniel Vetter continue; 2835c42664ccSDaniel Vetter 2836e32192e1STvrtko Ursulin iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2837e32192e1STvrtko Ursulin if (!iir) { 2838e32192e1STvrtko Ursulin DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2839e32192e1STvrtko Ursulin continue; 2840e32192e1STvrtko Ursulin } 2841770de83dSDamien Lespiau 2842e32192e1STvrtko Ursulin ret = IRQ_HANDLED; 2843e32192e1STvrtko Ursulin I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2844e32192e1STvrtko Ursulin 2845fd3a4024SDaniel Vetter if (iir & GEN8_PIPE_VBLANK) 2846fd3a4024SDaniel Vetter drm_handle_vblank(&dev_priv->drm, pipe); 2847abd58f01SBen Widawsky 2848e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 284991d14251STvrtko Ursulin hsw_pipe_crc_irq_handler(dev_priv, pipe); 28500fbe7870SDaniel Vetter 2851e32192e1STvrtko Ursulin if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2852e32192e1STvrtko Ursulin intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 285338d83c96SDaniel Vetter 2854e32192e1STvrtko Ursulin fault_errors = iir; 2855bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) 2856e32192e1STvrtko Ursulin fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2857770de83dSDamien Lespiau else 2858e32192e1STvrtko Ursulin fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2859770de83dSDamien Lespiau 2860770de83dSDamien Lespiau if (fault_errors) 28611353ec38STvrtko Ursulin DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 286230100f2bSDaniel Vetter pipe_name(pipe), 2863e32192e1STvrtko Ursulin fault_errors); 2864abd58f01SBen Widawsky } 2865abd58f01SBen Widawsky 286691d14251STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2867266ea3d9SShashank Sharma master_ctl & GEN8_DE_PCH_IRQ) { 286892d03a80SDaniel Vetter /* 286992d03a80SDaniel Vetter * FIXME(BDW): Assume for now that the new interrupt handling 287092d03a80SDaniel Vetter * scheme also closed the SDE interrupt handling race we've seen 287192d03a80SDaniel Vetter * on older pch-split platforms. But this needs testing. 287292d03a80SDaniel Vetter */ 2873e32192e1STvrtko Ursulin iir = I915_READ(SDEIIR); 2874e32192e1STvrtko Ursulin if (iir) { 2875e32192e1STvrtko Ursulin I915_WRITE(SDEIIR, iir); 287692d03a80SDaniel Vetter ret = IRQ_HANDLED; 28776dbf30ceSVille Syrjälä 287831604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 287931604222SAnusha Srivatsa icp_irq_handler(dev_priv, iir); 288031604222SAnusha Srivatsa else if (HAS_PCH_SPT(dev_priv) || 288131604222SAnusha Srivatsa HAS_PCH_KBP(dev_priv) || 28827b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 288391d14251STvrtko Ursulin spt_irq_handler(dev_priv, iir); 28846dbf30ceSVille Syrjälä else 288591d14251STvrtko Ursulin cpt_irq_handler(dev_priv, iir); 28862dfb0b81SJani Nikula } else { 28872dfb0b81SJani Nikula /* 28882dfb0b81SJani Nikula * Like on previous PCH there seems to be something 28892dfb0b81SJani Nikula * fishy going on with forwarding PCH interrupts. 28902dfb0b81SJani Nikula */ 28912dfb0b81SJani Nikula DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 28922dfb0b81SJani Nikula } 289392d03a80SDaniel Vetter } 289492d03a80SDaniel Vetter 2895f11a0f46STvrtko Ursulin return ret; 2896f11a0f46STvrtko Ursulin } 2897f11a0f46STvrtko Ursulin 2898f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg) 2899f11a0f46STvrtko Ursulin { 2900f0fd96f5SChris Wilson struct drm_i915_private *dev_priv = to_i915(arg); 2901f11a0f46STvrtko Ursulin u32 master_ctl; 2902f0fd96f5SChris Wilson u32 gt_iir[4]; 2903f11a0f46STvrtko Ursulin 2904f11a0f46STvrtko Ursulin if (!intel_irqs_enabled(dev_priv)) 2905f11a0f46STvrtko Ursulin return IRQ_NONE; 2906f11a0f46STvrtko Ursulin 2907f11a0f46STvrtko Ursulin master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2908f11a0f46STvrtko Ursulin master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2909f11a0f46STvrtko Ursulin if (!master_ctl) 2910f11a0f46STvrtko Ursulin return IRQ_NONE; 2911f11a0f46STvrtko Ursulin 2912f11a0f46STvrtko Ursulin I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2913f11a0f46STvrtko Ursulin 2914f11a0f46STvrtko Ursulin /* Find, clear, then process each source of interrupt */ 291555ef72f2SChris Wilson gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2916f0fd96f5SChris Wilson 2917f0fd96f5SChris Wilson /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2918f0fd96f5SChris Wilson if (master_ctl & ~GEN8_GT_IRQS) { 2919f0fd96f5SChris Wilson disable_rpm_wakeref_asserts(dev_priv); 292055ef72f2SChris Wilson gen8_de_irq_handler(dev_priv, master_ctl); 2921f0fd96f5SChris Wilson enable_rpm_wakeref_asserts(dev_priv); 2922f0fd96f5SChris Wilson } 2923f11a0f46STvrtko Ursulin 2924cb0d205eSChris Wilson I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2925abd58f01SBen Widawsky 2926f0fd96f5SChris Wilson gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 29271f814dacSImre Deak 292855ef72f2SChris Wilson return IRQ_HANDLED; 2929abd58f01SBen Widawsky } 2930abd58f01SBen Widawsky 293136703e79SChris Wilson struct wedge_me { 293236703e79SChris Wilson struct delayed_work work; 293336703e79SChris Wilson struct drm_i915_private *i915; 293436703e79SChris Wilson const char *name; 293536703e79SChris Wilson }; 293636703e79SChris Wilson 293736703e79SChris Wilson static void wedge_me(struct work_struct *work) 293836703e79SChris Wilson { 293936703e79SChris Wilson struct wedge_me *w = container_of(work, typeof(*w), work.work); 294036703e79SChris Wilson 294136703e79SChris Wilson dev_err(w->i915->drm.dev, 294236703e79SChris Wilson "%s timed out, cancelling all in-flight rendering.\n", 294336703e79SChris Wilson w->name); 294436703e79SChris Wilson i915_gem_set_wedged(w->i915); 294536703e79SChris Wilson } 294636703e79SChris Wilson 294736703e79SChris Wilson static void __init_wedge(struct wedge_me *w, 294836703e79SChris Wilson struct drm_i915_private *i915, 294936703e79SChris Wilson long timeout, 295036703e79SChris Wilson const char *name) 295136703e79SChris Wilson { 295236703e79SChris Wilson w->i915 = i915; 295336703e79SChris Wilson w->name = name; 295436703e79SChris Wilson 295536703e79SChris Wilson INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 295636703e79SChris Wilson schedule_delayed_work(&w->work, timeout); 295736703e79SChris Wilson } 295836703e79SChris Wilson 295936703e79SChris Wilson static void __fini_wedge(struct wedge_me *w) 296036703e79SChris Wilson { 296136703e79SChris Wilson cancel_delayed_work_sync(&w->work); 296236703e79SChris Wilson destroy_delayed_work_on_stack(&w->work); 296336703e79SChris Wilson w->i915 = NULL; 296436703e79SChris Wilson } 296536703e79SChris Wilson 296636703e79SChris Wilson #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 296736703e79SChris Wilson for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 296836703e79SChris Wilson (W)->i915; \ 296936703e79SChris Wilson __fini_wedge((W))) 297036703e79SChris Wilson 297151951ae7SMika Kuoppala static u32 2972f744dbc2SMika Kuoppala gen11_gt_engine_identity(struct drm_i915_private * const i915, 297351951ae7SMika Kuoppala const unsigned int bank, const unsigned int bit) 297451951ae7SMika Kuoppala { 297551951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 297651951ae7SMika Kuoppala u32 timeout_ts; 297751951ae7SMika Kuoppala u32 ident; 297851951ae7SMika Kuoppala 297996606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 298096606f3bSOscar Mateo 298151951ae7SMika Kuoppala raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 298251951ae7SMika Kuoppala 298351951ae7SMika Kuoppala /* 298451951ae7SMika Kuoppala * NB: Specs do not specify how long to spin wait, 298551951ae7SMika Kuoppala * so we do ~100us as an educated guess. 298651951ae7SMika Kuoppala */ 298751951ae7SMika Kuoppala timeout_ts = (local_clock() >> 10) + 100; 298851951ae7SMika Kuoppala do { 298951951ae7SMika Kuoppala ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 299051951ae7SMika Kuoppala } while (!(ident & GEN11_INTR_DATA_VALID) && 299151951ae7SMika Kuoppala !time_after32(local_clock() >> 10, timeout_ts)); 299251951ae7SMika Kuoppala 299351951ae7SMika Kuoppala if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 299451951ae7SMika Kuoppala DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 299551951ae7SMika Kuoppala bank, bit, ident); 299651951ae7SMika Kuoppala return 0; 299751951ae7SMika Kuoppala } 299851951ae7SMika Kuoppala 299951951ae7SMika Kuoppala raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 300051951ae7SMika Kuoppala GEN11_INTR_DATA_VALID); 300151951ae7SMika Kuoppala 3002f744dbc2SMika Kuoppala return ident; 3003f744dbc2SMika Kuoppala } 3004f744dbc2SMika Kuoppala 3005f744dbc2SMika Kuoppala static void 3006f744dbc2SMika Kuoppala gen11_other_irq_handler(struct drm_i915_private * const i915, 3007f744dbc2SMika Kuoppala const u8 instance, const u16 iir) 3008f744dbc2SMika Kuoppala { 3009d02b98b8SOscar Mateo if (instance == OTHER_GTPM_INSTANCE) 3010d02b98b8SOscar Mateo return gen6_rps_irq_handler(i915, iir); 3011d02b98b8SOscar Mateo 3012f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3013f744dbc2SMika Kuoppala instance, iir); 3014f744dbc2SMika Kuoppala } 3015f744dbc2SMika Kuoppala 3016f744dbc2SMika Kuoppala static void 3017f744dbc2SMika Kuoppala gen11_engine_irq_handler(struct drm_i915_private * const i915, 3018f744dbc2SMika Kuoppala const u8 class, const u8 instance, const u16 iir) 3019f744dbc2SMika Kuoppala { 3020f744dbc2SMika Kuoppala struct intel_engine_cs *engine; 3021f744dbc2SMika Kuoppala 3022f744dbc2SMika Kuoppala if (instance <= MAX_ENGINE_INSTANCE) 3023f744dbc2SMika Kuoppala engine = i915->engine_class[class][instance]; 3024f744dbc2SMika Kuoppala else 3025f744dbc2SMika Kuoppala engine = NULL; 3026f744dbc2SMika Kuoppala 3027f744dbc2SMika Kuoppala if (likely(engine)) 3028f744dbc2SMika Kuoppala return gen8_cs_irq_handler(engine, iir); 3029f744dbc2SMika Kuoppala 3030f744dbc2SMika Kuoppala WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3031f744dbc2SMika Kuoppala class, instance); 3032f744dbc2SMika Kuoppala } 3033f744dbc2SMika Kuoppala 3034f744dbc2SMika Kuoppala static void 3035f744dbc2SMika Kuoppala gen11_gt_identity_handler(struct drm_i915_private * const i915, 3036f744dbc2SMika Kuoppala const u32 identity) 3037f744dbc2SMika Kuoppala { 3038f744dbc2SMika Kuoppala const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3039f744dbc2SMika Kuoppala const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3040f744dbc2SMika Kuoppala const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3041f744dbc2SMika Kuoppala 3042f744dbc2SMika Kuoppala if (unlikely(!intr)) 3043f744dbc2SMika Kuoppala return; 3044f744dbc2SMika Kuoppala 3045f744dbc2SMika Kuoppala if (class <= COPY_ENGINE_CLASS) 3046f744dbc2SMika Kuoppala return gen11_engine_irq_handler(i915, class, instance, intr); 3047f744dbc2SMika Kuoppala 3048f744dbc2SMika Kuoppala if (class == OTHER_CLASS) 3049f744dbc2SMika Kuoppala return gen11_other_irq_handler(i915, instance, intr); 3050f744dbc2SMika Kuoppala 3051f744dbc2SMika Kuoppala WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3052f744dbc2SMika Kuoppala class, instance, intr); 305351951ae7SMika Kuoppala } 305451951ae7SMika Kuoppala 305551951ae7SMika Kuoppala static void 305696606f3bSOscar Mateo gen11_gt_bank_handler(struct drm_i915_private * const i915, 305796606f3bSOscar Mateo const unsigned int bank) 305851951ae7SMika Kuoppala { 305951951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 306051951ae7SMika Kuoppala unsigned long intr_dw; 306151951ae7SMika Kuoppala unsigned int bit; 306251951ae7SMika Kuoppala 306396606f3bSOscar Mateo lockdep_assert_held(&i915->irq_lock); 306451951ae7SMika Kuoppala 306551951ae7SMika Kuoppala intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 306651951ae7SMika Kuoppala 306751951ae7SMika Kuoppala if (unlikely(!intr_dw)) { 306851951ae7SMika Kuoppala DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 306996606f3bSOscar Mateo return; 307051951ae7SMika Kuoppala } 307151951ae7SMika Kuoppala 307251951ae7SMika Kuoppala for_each_set_bit(bit, &intr_dw, 32) { 3073f744dbc2SMika Kuoppala const u32 ident = gen11_gt_engine_identity(i915, 3074f744dbc2SMika Kuoppala bank, bit); 307551951ae7SMika Kuoppala 3076f744dbc2SMika Kuoppala gen11_gt_identity_handler(i915, ident); 307751951ae7SMika Kuoppala } 307851951ae7SMika Kuoppala 307951951ae7SMika Kuoppala /* Clear must be after shared has been served for engine */ 308051951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 308151951ae7SMika Kuoppala } 308296606f3bSOscar Mateo 308396606f3bSOscar Mateo static void 308496606f3bSOscar Mateo gen11_gt_irq_handler(struct drm_i915_private * const i915, 308596606f3bSOscar Mateo const u32 master_ctl) 308696606f3bSOscar Mateo { 308796606f3bSOscar Mateo unsigned int bank; 308896606f3bSOscar Mateo 308996606f3bSOscar Mateo spin_lock(&i915->irq_lock); 309096606f3bSOscar Mateo 309196606f3bSOscar Mateo for (bank = 0; bank < 2; bank++) { 309296606f3bSOscar Mateo if (master_ctl & GEN11_GT_DW_IRQ(bank)) 309396606f3bSOscar Mateo gen11_gt_bank_handler(i915, bank); 309496606f3bSOscar Mateo } 309596606f3bSOscar Mateo 309696606f3bSOscar Mateo spin_unlock(&i915->irq_lock); 309751951ae7SMika Kuoppala } 309851951ae7SMika Kuoppala 3099df0d28c1SDhinakaran Pandiyan static void 3100df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl, 3101df0d28c1SDhinakaran Pandiyan u32 *iir) 3102df0d28c1SDhinakaran Pandiyan { 3103df0d28c1SDhinakaran Pandiyan void __iomem * const regs = dev_priv->regs; 3104df0d28c1SDhinakaran Pandiyan 3105df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3106df0d28c1SDhinakaran Pandiyan return; 3107df0d28c1SDhinakaran Pandiyan 3108df0d28c1SDhinakaran Pandiyan *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3109df0d28c1SDhinakaran Pandiyan if (likely(*iir)) 3110df0d28c1SDhinakaran Pandiyan raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir); 3111df0d28c1SDhinakaran Pandiyan } 3112df0d28c1SDhinakaran Pandiyan 3113df0d28c1SDhinakaran Pandiyan static void 3114df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, 3115df0d28c1SDhinakaran Pandiyan const u32 master_ctl, const u32 iir) 3116df0d28c1SDhinakaran Pandiyan { 3117df0d28c1SDhinakaran Pandiyan if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3118df0d28c1SDhinakaran Pandiyan return; 3119df0d28c1SDhinakaran Pandiyan 3120df0d28c1SDhinakaran Pandiyan if (unlikely(!iir)) { 3121df0d28c1SDhinakaran Pandiyan DRM_ERROR("GU_MISC iir blank!\n"); 3122df0d28c1SDhinakaran Pandiyan return; 3123df0d28c1SDhinakaran Pandiyan } 3124df0d28c1SDhinakaran Pandiyan 3125df0d28c1SDhinakaran Pandiyan if (iir & GEN11_GU_MISC_GSE) 3126df0d28c1SDhinakaran Pandiyan intel_opregion_asle_intr(dev_priv); 3127df0d28c1SDhinakaran Pandiyan else 3128df0d28c1SDhinakaran Pandiyan DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir); 3129df0d28c1SDhinakaran Pandiyan } 3130df0d28c1SDhinakaran Pandiyan 313151951ae7SMika Kuoppala static irqreturn_t gen11_irq_handler(int irq, void *arg) 313251951ae7SMika Kuoppala { 313351951ae7SMika Kuoppala struct drm_i915_private * const i915 = to_i915(arg); 313451951ae7SMika Kuoppala void __iomem * const regs = i915->regs; 313551951ae7SMika Kuoppala u32 master_ctl; 3136df0d28c1SDhinakaran Pandiyan u32 gu_misc_iir; 313751951ae7SMika Kuoppala 313851951ae7SMika Kuoppala if (!intel_irqs_enabled(i915)) 313951951ae7SMika Kuoppala return IRQ_NONE; 314051951ae7SMika Kuoppala 314151951ae7SMika Kuoppala master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 314251951ae7SMika Kuoppala master_ctl &= ~GEN11_MASTER_IRQ; 314351951ae7SMika Kuoppala if (!master_ctl) 314451951ae7SMika Kuoppala return IRQ_NONE; 314551951ae7SMika Kuoppala 314651951ae7SMika Kuoppala /* Disable interrupts. */ 314751951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 314851951ae7SMika Kuoppala 314951951ae7SMika Kuoppala /* Find, clear, then process each source of interrupt. */ 315051951ae7SMika Kuoppala gen11_gt_irq_handler(i915, master_ctl); 315151951ae7SMika Kuoppala 315251951ae7SMika Kuoppala /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 315351951ae7SMika Kuoppala if (master_ctl & GEN11_DISPLAY_IRQ) { 315451951ae7SMika Kuoppala const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 315551951ae7SMika Kuoppala 315651951ae7SMika Kuoppala disable_rpm_wakeref_asserts(i915); 315751951ae7SMika Kuoppala /* 315851951ae7SMika Kuoppala * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 315951951ae7SMika Kuoppala * for the display related bits. 316051951ae7SMika Kuoppala */ 316151951ae7SMika Kuoppala gen8_de_irq_handler(i915, disp_ctl); 316251951ae7SMika Kuoppala enable_rpm_wakeref_asserts(i915); 316351951ae7SMika Kuoppala } 316451951ae7SMika Kuoppala 3165df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir); 3166df0d28c1SDhinakaran Pandiyan 316751951ae7SMika Kuoppala /* Acknowledge and enable interrupts. */ 316851951ae7SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 316951951ae7SMika Kuoppala 3170df0d28c1SDhinakaran Pandiyan gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir); 3171df0d28c1SDhinakaran Pandiyan 317251951ae7SMika Kuoppala return IRQ_HANDLED; 317351951ae7SMika Kuoppala } 317451951ae7SMika Kuoppala 3175ce800754SChris Wilson static void i915_reset_device(struct drm_i915_private *dev_priv, 3176d0667e9cSChris Wilson u32 engine_mask, 3177d0667e9cSChris Wilson const char *reason) 31788a905236SJesse Barnes { 3179ce800754SChris Wilson struct i915_gpu_error *error = &dev_priv->gpu_error; 318091c8a326SChris Wilson struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3181cce723edSBen Widawsky char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3182cce723edSBen Widawsky char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3183cce723edSBen Widawsky char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 318436703e79SChris Wilson struct wedge_me w; 31858a905236SJesse Barnes 3186c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 31878a905236SJesse Barnes 318844d98a61SZhao Yakui DRM_DEBUG_DRIVER("resetting chip\n"); 3189c033666aSChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 31901f83fee0SDaniel Vetter 319136703e79SChris Wilson /* Use a watchdog to ensure that our reset completes */ 319236703e79SChris Wilson i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3193c033666aSChris Wilson intel_prepare_reset(dev_priv); 31947514747dSVille Syrjälä 3195d0667e9cSChris Wilson error->reason = reason; 3196d0667e9cSChris Wilson error->stalled_mask = engine_mask; 3197ce800754SChris Wilson 319836703e79SChris Wilson /* Signal that locked waiters should reset the GPU */ 3199d0667e9cSChris Wilson smp_mb__before_atomic(); 3200ce800754SChris Wilson set_bit(I915_RESET_HANDOFF, &error->flags); 3201ce800754SChris Wilson wake_up_all(&error->wait_queue); 32028c185ecaSChris Wilson 320336703e79SChris Wilson /* Wait for anyone holding the lock to wakeup, without 320436703e79SChris Wilson * blocking indefinitely on struct_mutex. 320517e1df07SDaniel Vetter */ 320636703e79SChris Wilson do { 3207780f262aSChris Wilson if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3208d0667e9cSChris Wilson i915_reset(dev_priv, engine_mask, reason); 3209221fe799SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 3210780f262aSChris Wilson } 3211ce800754SChris Wilson } while (wait_on_bit_timeout(&error->flags, 32128c185ecaSChris Wilson I915_RESET_HANDOFF, 3213780f262aSChris Wilson TASK_UNINTERRUPTIBLE, 321436703e79SChris Wilson 1)); 3215f69061beSDaniel Vetter 3216d0667e9cSChris Wilson error->stalled_mask = 0; 3217ce800754SChris Wilson error->reason = NULL; 3218ce800754SChris Wilson 3219c033666aSChris Wilson intel_finish_reset(dev_priv); 322036703e79SChris Wilson } 3221f454c694SImre Deak 3222ce800754SChris Wilson if (!test_bit(I915_WEDGED, &error->flags)) 3223ce800754SChris Wilson kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3224f316a42cSBen Gamari } 32258a905236SJesse Barnes 3226eaa14c24SChris Wilson static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3227c0e09200SDave Airlie { 3228eaa14c24SChris Wilson u32 eir; 322963eeaf38SJesse Barnes 3230eaa14c24SChris Wilson if (!IS_GEN2(dev_priv)) 3231eaa14c24SChris Wilson I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 323263eeaf38SJesse Barnes 3233eaa14c24SChris Wilson if (INTEL_GEN(dev_priv) < 4) 3234eaa14c24SChris Wilson I915_WRITE(IPEIR, I915_READ(IPEIR)); 3235eaa14c24SChris Wilson else 3236eaa14c24SChris Wilson I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 32378a905236SJesse Barnes 3238eaa14c24SChris Wilson I915_WRITE(EIR, I915_READ(EIR)); 323963eeaf38SJesse Barnes eir = I915_READ(EIR); 324063eeaf38SJesse Barnes if (eir) { 324163eeaf38SJesse Barnes /* 324263eeaf38SJesse Barnes * some errors might have become stuck, 324363eeaf38SJesse Barnes * mask them. 324463eeaf38SJesse Barnes */ 3245eaa14c24SChris Wilson DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 324663eeaf38SJesse Barnes I915_WRITE(EMR, I915_READ(EMR) | eir); 324778c357ddSVille Syrjälä I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 324863eeaf38SJesse Barnes } 324935aed2e6SChris Wilson } 325035aed2e6SChris Wilson 325135aed2e6SChris Wilson /** 3252b8d24a06SMika Kuoppala * i915_handle_error - handle a gpu error 325314bb2c11STvrtko Ursulin * @dev_priv: i915 device private 325414b730fcSarun.siluvery@linux.intel.com * @engine_mask: mask representing engines that are hung 3255ce800754SChris Wilson * @flags: control flags 325687c390b6SMichel Thierry * @fmt: Error message format string 325787c390b6SMichel Thierry * 3258aafd8581SJavier Martinez Canillas * Do some basic checking of register state at error time and 325935aed2e6SChris Wilson * dump it to the syslog. Also call i915_capture_error_state() to make 326035aed2e6SChris Wilson * sure we get a record and make it available in debugfs. Fire a uevent 326135aed2e6SChris Wilson * so userspace knows something bad happened (should trigger collection 326235aed2e6SChris Wilson * of a ring dump etc.). 326335aed2e6SChris Wilson */ 3264c033666aSChris Wilson void i915_handle_error(struct drm_i915_private *dev_priv, 3265c033666aSChris Wilson u32 engine_mask, 3266ce800754SChris Wilson unsigned long flags, 326758174462SMika Kuoppala const char *fmt, ...) 326835aed2e6SChris Wilson { 3269142bc7d9SMichel Thierry struct intel_engine_cs *engine; 3270142bc7d9SMichel Thierry unsigned int tmp; 327158174462SMika Kuoppala char error_msg[80]; 3272ce800754SChris Wilson char *msg = NULL; 3273ce800754SChris Wilson 3274ce800754SChris Wilson if (fmt) { 3275ce800754SChris Wilson va_list args; 327635aed2e6SChris Wilson 327758174462SMika Kuoppala va_start(args, fmt); 327858174462SMika Kuoppala vscnprintf(error_msg, sizeof(error_msg), fmt, args); 327958174462SMika Kuoppala va_end(args); 328058174462SMika Kuoppala 3281ce800754SChris Wilson msg = error_msg; 3282ce800754SChris Wilson } 3283ce800754SChris Wilson 32841604a86dSChris Wilson /* 32851604a86dSChris Wilson * In most cases it's guaranteed that we get here with an RPM 32861604a86dSChris Wilson * reference held, for example because there is a pending GPU 32871604a86dSChris Wilson * request that won't finish until the reset is done. This 32881604a86dSChris Wilson * isn't the case at least when we get here by doing a 32891604a86dSChris Wilson * simulated reset via debugfs, so get an RPM reference. 32901604a86dSChris Wilson */ 32911604a86dSChris Wilson intel_runtime_pm_get(dev_priv); 32921604a86dSChris Wilson 3293873d66fbSChris Wilson engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3294ce800754SChris Wilson 3295ce800754SChris Wilson if (flags & I915_ERROR_CAPTURE) { 3296ce800754SChris Wilson i915_capture_error_state(dev_priv, engine_mask, msg); 3297eaa14c24SChris Wilson i915_clear_error_registers(dev_priv); 3298ce800754SChris Wilson } 32998a905236SJesse Barnes 3300142bc7d9SMichel Thierry /* 3301142bc7d9SMichel Thierry * Try engine reset when available. We fall back to full reset if 3302142bc7d9SMichel Thierry * single reset fails. 3303142bc7d9SMichel Thierry */ 3304142bc7d9SMichel Thierry if (intel_has_reset_engine(dev_priv)) { 3305142bc7d9SMichel Thierry for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 33069db529aaSDaniel Vetter BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3307142bc7d9SMichel Thierry if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3308142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3309142bc7d9SMichel Thierry continue; 3310142bc7d9SMichel Thierry 3311ce800754SChris Wilson if (i915_reset_engine(engine, msg) == 0) 3312142bc7d9SMichel Thierry engine_mask &= ~intel_engine_flag(engine); 3313142bc7d9SMichel Thierry 3314142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3315142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3316142bc7d9SMichel Thierry wake_up_bit(&dev_priv->gpu_error.flags, 3317142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id); 3318142bc7d9SMichel Thierry } 3319142bc7d9SMichel Thierry } 3320142bc7d9SMichel Thierry 33218af29b0cSChris Wilson if (!engine_mask) 33221604a86dSChris Wilson goto out; 33238af29b0cSChris Wilson 3324142bc7d9SMichel Thierry /* Full reset needs the mutex, stop any other user trying to do so. */ 3325d5367307SChris Wilson if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3326d5367307SChris Wilson wait_event(dev_priv->gpu_error.reset_queue, 3327d5367307SChris Wilson !test_bit(I915_RESET_BACKOFF, 3328d5367307SChris Wilson &dev_priv->gpu_error.flags)); 33291604a86dSChris Wilson goto out; 3330d5367307SChris Wilson } 3331ba1234d1SBen Gamari 3332142bc7d9SMichel Thierry /* Prevent any other reset-engine attempt. */ 3333142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3334142bc7d9SMichel Thierry while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3335142bc7d9SMichel Thierry &dev_priv->gpu_error.flags)) 3336142bc7d9SMichel Thierry wait_on_bit(&dev_priv->gpu_error.flags, 3337142bc7d9SMichel Thierry I915_RESET_ENGINE + engine->id, 3338142bc7d9SMichel Thierry TASK_UNINTERRUPTIBLE); 3339142bc7d9SMichel Thierry } 3340142bc7d9SMichel Thierry 3341d0667e9cSChris Wilson i915_reset_device(dev_priv, engine_mask, msg); 3342d5367307SChris Wilson 3343142bc7d9SMichel Thierry for_each_engine(engine, dev_priv, tmp) { 3344142bc7d9SMichel Thierry clear_bit(I915_RESET_ENGINE + engine->id, 3345142bc7d9SMichel Thierry &dev_priv->gpu_error.flags); 3346142bc7d9SMichel Thierry } 3347142bc7d9SMichel Thierry 3348d5367307SChris Wilson clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3349d5367307SChris Wilson wake_up_all(&dev_priv->gpu_error.reset_queue); 33501604a86dSChris Wilson 33511604a86dSChris Wilson out: 33521604a86dSChris Wilson intel_runtime_pm_put(dev_priv); 33538a905236SJesse Barnes } 33548a905236SJesse Barnes 335542f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 335642f52ef8SKeith Packard * we use as a pipe index 335742f52ef8SKeith Packard */ 335886e83e35SChris Wilson static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 33590a3e67a4SJesse Barnes { 3360fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3361e9d21d7fSKeith Packard unsigned long irqflags; 336271e0ffa5SJesse Barnes 33631ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 336486e83e35SChris Wilson i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 336586e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 336686e83e35SChris Wilson 336786e83e35SChris Wilson return 0; 336886e83e35SChris Wilson } 336986e83e35SChris Wilson 337086e83e35SChris Wilson static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 337186e83e35SChris Wilson { 337286e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 337386e83e35SChris Wilson unsigned long irqflags; 337486e83e35SChris Wilson 337586e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 33767c463586SKeith Packard i915_enable_pipestat(dev_priv, pipe, 3377755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 33781ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 33798692d00eSChris Wilson 33800a3e67a4SJesse Barnes return 0; 33810a3e67a4SJesse Barnes } 33820a3e67a4SJesse Barnes 338388e72717SThierry Reding static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3384f796cf8fSJesse Barnes { 3385fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3386f796cf8fSJesse Barnes unsigned long irqflags; 338755b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 338886e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3389f796cf8fSJesse Barnes 3390f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3391fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, bit); 3392b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3393b1f14ad0SJesse Barnes 33942e8bf223SDhinakaran Pandiyan /* Even though there is no DMC, frame counter can get stuck when 33952e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated. 33962e8bf223SDhinakaran Pandiyan */ 33972e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 33982e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 33992e8bf223SDhinakaran Pandiyan 3400b1f14ad0SJesse Barnes return 0; 3401b1f14ad0SJesse Barnes } 3402b1f14ad0SJesse Barnes 340388e72717SThierry Reding static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3404abd58f01SBen Widawsky { 3405fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3406abd58f01SBen Widawsky unsigned long irqflags; 3407abd58f01SBen Widawsky 3408abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3409013d3752SVille Syrjälä bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3410abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3411013d3752SVille Syrjälä 34122e8bf223SDhinakaran Pandiyan /* Even if there is no DMC, frame counter can get stuck when 34132e8bf223SDhinakaran Pandiyan * PSR is active as no frames are generated, so check only for PSR. 34142e8bf223SDhinakaran Pandiyan */ 34152e8bf223SDhinakaran Pandiyan if (HAS_PSR(dev_priv)) 34162e8bf223SDhinakaran Pandiyan drm_vblank_restore(dev, pipe); 34172e8bf223SDhinakaran Pandiyan 3418abd58f01SBen Widawsky return 0; 3419abd58f01SBen Widawsky } 3420abd58f01SBen Widawsky 342142f52ef8SKeith Packard /* Called from drm generic code, passed 'crtc' which 342242f52ef8SKeith Packard * we use as a pipe index 342342f52ef8SKeith Packard */ 342486e83e35SChris Wilson static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 342586e83e35SChris Wilson { 342686e83e35SChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 342786e83e35SChris Wilson unsigned long irqflags; 342886e83e35SChris Wilson 342986e83e35SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 343086e83e35SChris Wilson i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 343186e83e35SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 343286e83e35SChris Wilson } 343386e83e35SChris Wilson 343486e83e35SChris Wilson static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 34350a3e67a4SJesse Barnes { 3436fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3437e9d21d7fSKeith Packard unsigned long irqflags; 34380a3e67a4SJesse Barnes 34391ec14ad3SChris Wilson spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 34407c463586SKeith Packard i915_disable_pipestat(dev_priv, pipe, 3441755e9019SImre Deak PIPE_START_VBLANK_INTERRUPT_STATUS); 34421ec14ad3SChris Wilson spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 34430a3e67a4SJesse Barnes } 34440a3e67a4SJesse Barnes 344588e72717SThierry Reding static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3446f796cf8fSJesse Barnes { 3447fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3448f796cf8fSJesse Barnes unsigned long irqflags; 344955b8f2a7STvrtko Ursulin uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 345086e83e35SChris Wilson DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3451f796cf8fSJesse Barnes 3452f796cf8fSJesse Barnes spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3453fbdedaeaSVille Syrjälä ilk_disable_display_irq(dev_priv, bit); 3454b1f14ad0SJesse Barnes spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3455b1f14ad0SJesse Barnes } 3456b1f14ad0SJesse Barnes 345788e72717SThierry Reding static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3458abd58f01SBen Widawsky { 3459fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3460abd58f01SBen Widawsky unsigned long irqflags; 3461abd58f01SBen Widawsky 3462abd58f01SBen Widawsky spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3463013d3752SVille Syrjälä bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3464abd58f01SBen Widawsky spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3465abd58f01SBen Widawsky } 3466abd58f01SBen Widawsky 3467b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv) 346891738a95SPaulo Zanoni { 34696e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 347091738a95SPaulo Zanoni return; 347191738a95SPaulo Zanoni 34723488d4ebSVille Syrjälä GEN3_IRQ_RESET(SDE); 3473105b122eSPaulo Zanoni 34746e266956STvrtko Ursulin if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3475105b122eSPaulo Zanoni I915_WRITE(SERR_INT, 0xffffffff); 3476622364b6SPaulo Zanoni } 3477105b122eSPaulo Zanoni 347891738a95SPaulo Zanoni /* 3479622364b6SPaulo Zanoni * SDEIER is also touched by the interrupt handler to work around missed PCH 3480622364b6SPaulo Zanoni * interrupts. Hence we can't update it after the interrupt handler is enabled - 3481622364b6SPaulo Zanoni * instead we unconditionally enable all PCH interrupt sources here, but then 3482622364b6SPaulo Zanoni * only unmask them as needed with SDEIMR. 3483622364b6SPaulo Zanoni * 3484622364b6SPaulo Zanoni * This function needs to be called before interrupts are enabled. 348591738a95SPaulo Zanoni */ 3486622364b6SPaulo Zanoni static void ibx_irq_pre_postinstall(struct drm_device *dev) 3487622364b6SPaulo Zanoni { 3488fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3489622364b6SPaulo Zanoni 34906e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3491622364b6SPaulo Zanoni return; 3492622364b6SPaulo Zanoni 3493622364b6SPaulo Zanoni WARN_ON(I915_READ(SDEIER) != 0); 349491738a95SPaulo Zanoni I915_WRITE(SDEIER, 0xffffffff); 349591738a95SPaulo Zanoni POSTING_READ(SDEIER); 349691738a95SPaulo Zanoni } 349791738a95SPaulo Zanoni 3498b243f530STvrtko Ursulin static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3499d18ea1b5SDaniel Vetter { 35003488d4ebSVille Syrjälä GEN3_IRQ_RESET(GT); 3501b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) 35023488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN6_PM); 3503d18ea1b5SDaniel Vetter } 3504d18ea1b5SDaniel Vetter 350570591a41SVille Syrjälä static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 350670591a41SVille Syrjälä { 350771b8b41dSVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 350871b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 350971b8b41dSVille Syrjälä else 351071b8b41dSVille Syrjälä I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 351171b8b41dSVille Syrjälä 3512ad22d106SVille Syrjälä i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 351370591a41SVille Syrjälä I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 351470591a41SVille Syrjälä 351544d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 351670591a41SVille Syrjälä 35173488d4ebSVille Syrjälä GEN3_IRQ_RESET(VLV_); 35188bd099a7SChris Wilson dev_priv->irq_mask = ~0u; 351970591a41SVille Syrjälä } 352070591a41SVille Syrjälä 35218bb61306SVille Syrjälä static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 35228bb61306SVille Syrjälä { 35238bb61306SVille Syrjälä u32 pipestat_mask; 35249ab981f2SVille Syrjälä u32 enable_mask; 35258bb61306SVille Syrjälä enum pipe pipe; 35268bb61306SVille Syrjälä 3527842ebf7aSVille Syrjälä pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 35288bb61306SVille Syrjälä 35298bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 35308bb61306SVille Syrjälä for_each_pipe(dev_priv, pipe) 35318bb61306SVille Syrjälä i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 35328bb61306SVille Syrjälä 35339ab981f2SVille Syrjälä enable_mask = I915_DISPLAY_PORT_INTERRUPT | 35348bb61306SVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3535ebf5f921SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3536ebf5f921SVille Syrjälä I915_LPE_PIPE_A_INTERRUPT | 3537ebf5f921SVille Syrjälä I915_LPE_PIPE_B_INTERRUPT; 3538ebf5f921SVille Syrjälä 35398bb61306SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 3540ebf5f921SVille Syrjälä enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3541ebf5f921SVille Syrjälä I915_LPE_PIPE_C_INTERRUPT; 35426b7eafc1SVille Syrjälä 35438bd099a7SChris Wilson WARN_ON(dev_priv->irq_mask != ~0u); 35446b7eafc1SVille Syrjälä 35459ab981f2SVille Syrjälä dev_priv->irq_mask = ~enable_mask; 35468bb61306SVille Syrjälä 35473488d4ebSVille Syrjälä GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 35488bb61306SVille Syrjälä } 35498bb61306SVille Syrjälä 35508bb61306SVille Syrjälä /* drm_dma.h hooks 35518bb61306SVille Syrjälä */ 35528bb61306SVille Syrjälä static void ironlake_irq_reset(struct drm_device *dev) 35538bb61306SVille Syrjälä { 3554fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35558bb61306SVille Syrjälä 3556d420a50cSVille Syrjälä if (IS_GEN5(dev_priv)) 35578bb61306SVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 35588bb61306SVille Syrjälä 35593488d4ebSVille Syrjälä GEN3_IRQ_RESET(DE); 35605db94019STvrtko Ursulin if (IS_GEN7(dev_priv)) 35618bb61306SVille Syrjälä I915_WRITE(GEN7_ERR_INT, 0xffffffff); 35628bb61306SVille Syrjälä 3563fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 3564fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3565fc340442SDaniel Vetter I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3566fc340442SDaniel Vetter } 3567fc340442SDaniel Vetter 3568b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 35698bb61306SVille Syrjälä 3570b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 35718bb61306SVille Syrjälä } 35728bb61306SVille Syrjälä 35736bcdb1c8SVille Syrjälä static void valleyview_irq_reset(struct drm_device *dev) 35747e231dbeSJesse Barnes { 3575fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 35767e231dbeSJesse Barnes 357734c7b8a7SVille Syrjälä I915_WRITE(VLV_MASTER_IER, 0); 357834c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 357934c7b8a7SVille Syrjälä 3580b243f530STvrtko Ursulin gen5_gt_irq_reset(dev_priv); 35817e231dbeSJesse Barnes 3582ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 35839918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 358470591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3585ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 35867e231dbeSJesse Barnes } 35877e231dbeSJesse Barnes 3588d6e3cca3SDaniel Vetter static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3589d6e3cca3SDaniel Vetter { 3590d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 0); 3591d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 1); 3592d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 2); 3593d6e3cca3SDaniel Vetter GEN8_IRQ_RESET_NDX(GT, 3); 3594d6e3cca3SDaniel Vetter } 3595d6e3cca3SDaniel Vetter 3596823f6b38SPaulo Zanoni static void gen8_irq_reset(struct drm_device *dev) 3597abd58f01SBen Widawsky { 3598fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 3599abd58f01SBen Widawsky int pipe; 3600abd58f01SBen Widawsky 3601abd58f01SBen Widawsky I915_WRITE(GEN8_MASTER_IRQ, 0); 3602abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 3603abd58f01SBen Widawsky 3604d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 3605abd58f01SBen Widawsky 3606e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3607e04f7eceSVille Syrjälä I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3608e04f7eceSVille Syrjälä 3609055e393fSDamien Lespiau for_each_pipe(dev_priv, pipe) 3610f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 3611813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 3612f86f3fb0SPaulo Zanoni GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3613abd58f01SBen Widawsky 36143488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_PORT_); 36153488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_DE_MISC_); 36163488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 3617abd58f01SBen Widawsky 36186e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 3619b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 3620abd58f01SBen Widawsky } 3621abd58f01SBen Widawsky 362251951ae7SMika Kuoppala static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 362351951ae7SMika Kuoppala { 362451951ae7SMika Kuoppala /* Disable RCS, BCS, VCS and VECS class engines. */ 362551951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 362651951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 362751951ae7SMika Kuoppala 362851951ae7SMika Kuoppala /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 362951951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 363051951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 363151951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 363251951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 363351951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3634d02b98b8SOscar Mateo 3635d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3636d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 363751951ae7SMika Kuoppala } 363851951ae7SMika Kuoppala 363951951ae7SMika Kuoppala static void gen11_irq_reset(struct drm_device *dev) 364051951ae7SMika Kuoppala { 364151951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 364251951ae7SMika Kuoppala int pipe; 364351951ae7SMika Kuoppala 364451951ae7SMika Kuoppala I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 364551951ae7SMika Kuoppala POSTING_READ(GEN11_GFX_MSTR_IRQ); 364651951ae7SMika Kuoppala 364751951ae7SMika Kuoppala gen11_gt_irq_reset(dev_priv); 364851951ae7SMika Kuoppala 364951951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 365051951ae7SMika Kuoppala 365151951ae7SMika Kuoppala for_each_pipe(dev_priv, pipe) 365251951ae7SMika Kuoppala if (intel_display_power_is_enabled(dev_priv, 365351951ae7SMika Kuoppala POWER_DOMAIN_PIPE(pipe))) 365451951ae7SMika Kuoppala GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 365551951ae7SMika Kuoppala 365651951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_PORT_); 365751951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_DE_MISC_); 3658121e758eSDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_DE_HPD_); 3659df0d28c1SDhinakaran Pandiyan GEN3_IRQ_RESET(GEN11_GU_MISC_); 366051951ae7SMika Kuoppala GEN3_IRQ_RESET(GEN8_PCU_); 366131604222SAnusha Srivatsa 366231604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 366331604222SAnusha Srivatsa GEN3_IRQ_RESET(SDE); 366451951ae7SMika Kuoppala } 366551951ae7SMika Kuoppala 36664c6c03beSDamien Lespiau void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3667001bd2cbSImre Deak u8 pipe_mask) 3668d49bdb0eSPaulo Zanoni { 36691180e206SPaulo Zanoni uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 36706831f3e3SVille Syrjälä enum pipe pipe; 3671d49bdb0eSPaulo Zanoni 367213321786SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 36739dfe2e3aSImre Deak 36749dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 36759dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 36769dfe2e3aSImre Deak return; 36779dfe2e3aSImre Deak } 36789dfe2e3aSImre Deak 36796831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 36806831f3e3SVille Syrjälä GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 36816831f3e3SVille Syrjälä dev_priv->de_irq_mask[pipe], 36826831f3e3SVille Syrjälä ~dev_priv->de_irq_mask[pipe] | extra_ier); 36839dfe2e3aSImre Deak 368413321786SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 3685d49bdb0eSPaulo Zanoni } 3686d49bdb0eSPaulo Zanoni 3687aae8ba84SVille Syrjälä void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3688001bd2cbSImre Deak u8 pipe_mask) 3689aae8ba84SVille Syrjälä { 36906831f3e3SVille Syrjälä enum pipe pipe; 36916831f3e3SVille Syrjälä 3692aae8ba84SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 36939dfe2e3aSImre Deak 36949dfe2e3aSImre Deak if (!intel_irqs_enabled(dev_priv)) { 36959dfe2e3aSImre Deak spin_unlock_irq(&dev_priv->irq_lock); 36969dfe2e3aSImre Deak return; 36979dfe2e3aSImre Deak } 36989dfe2e3aSImre Deak 36996831f3e3SVille Syrjälä for_each_pipe_masked(dev_priv, pipe, pipe_mask) 37006831f3e3SVille Syrjälä GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 37019dfe2e3aSImre Deak 3702aae8ba84SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 3703aae8ba84SVille Syrjälä 3704aae8ba84SVille Syrjälä /* make sure we're done processing display irqs */ 370591c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 3706aae8ba84SVille Syrjälä } 3707aae8ba84SVille Syrjälä 37086bcdb1c8SVille Syrjälä static void cherryview_irq_reset(struct drm_device *dev) 370943f328d7SVille Syrjälä { 3710fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 371143f328d7SVille Syrjälä 371243f328d7SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, 0); 371343f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 371443f328d7SVille Syrjälä 3715d6e3cca3SDaniel Vetter gen8_gt_irq_reset(dev_priv); 371643f328d7SVille Syrjälä 37173488d4ebSVille Syrjälä GEN3_IRQ_RESET(GEN8_PCU_); 371843f328d7SVille Syrjälä 3719ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 37209918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 372170591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 3722ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 372343f328d7SVille Syrjälä } 372443f328d7SVille Syrjälä 372591d14251STvrtko Ursulin static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 372687a02106SVille Syrjälä const u32 hpd[HPD_NUM_PINS]) 372787a02106SVille Syrjälä { 372887a02106SVille Syrjälä struct intel_encoder *encoder; 372987a02106SVille Syrjälä u32 enabled_irqs = 0; 373087a02106SVille Syrjälä 373191c8a326SChris Wilson for_each_intel_encoder(&dev_priv->drm, encoder) 373287a02106SVille Syrjälä if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 373387a02106SVille Syrjälä enabled_irqs |= hpd[encoder->hpd_pin]; 373487a02106SVille Syrjälä 373587a02106SVille Syrjälä return enabled_irqs; 373687a02106SVille Syrjälä } 373787a02106SVille Syrjälä 37381a56b1a2SImre Deak static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 37391a56b1a2SImre Deak { 37401a56b1a2SImre Deak u32 hotplug; 37411a56b1a2SImre Deak 37421a56b1a2SImre Deak /* 37431a56b1a2SImre Deak * Enable digital hotplug on the PCH, and configure the DP short pulse 37441a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec). 37451a56b1a2SImre Deak * The pulse duration bits are reserved on LPT+. 37461a56b1a2SImre Deak */ 37471a56b1a2SImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 37481a56b1a2SImre Deak hotplug &= ~(PORTB_PULSE_DURATION_MASK | 37491a56b1a2SImre Deak PORTC_PULSE_DURATION_MASK | 37501a56b1a2SImre Deak PORTD_PULSE_DURATION_MASK); 37511a56b1a2SImre Deak hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 37521a56b1a2SImre Deak hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 37531a56b1a2SImre Deak hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 37541a56b1a2SImre Deak /* 37551a56b1a2SImre Deak * When CPU and PCH are on the same package, port A 37561a56b1a2SImre Deak * HPD must be enabled in both north and south. 37571a56b1a2SImre Deak */ 37581a56b1a2SImre Deak if (HAS_PCH_LPT_LP(dev_priv)) 37591a56b1a2SImre Deak hotplug |= PORTA_HOTPLUG_ENABLE; 37601a56b1a2SImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 37611a56b1a2SImre Deak } 37621a56b1a2SImre Deak 376391d14251STvrtko Ursulin static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 376482a28bcfSDaniel Vetter { 37651a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 376682a28bcfSDaniel Vetter 376791d14251STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) { 3768fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK; 376991d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 377082a28bcfSDaniel Vetter } else { 3771fee884edSDaniel Vetter hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 377291d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 377382a28bcfSDaniel Vetter } 377482a28bcfSDaniel Vetter 3775fee884edSDaniel Vetter ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 377682a28bcfSDaniel Vetter 37771a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 37786dbf30ceSVille Syrjälä } 377926951cafSXiong Zhang 378031604222SAnusha Srivatsa static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 378131604222SAnusha Srivatsa { 378231604222SAnusha Srivatsa u32 hotplug; 378331604222SAnusha Srivatsa 378431604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_DDI); 378531604222SAnusha Srivatsa hotplug |= ICP_DDIA_HPD_ENABLE | 378631604222SAnusha Srivatsa ICP_DDIB_HPD_ENABLE; 378731604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 378831604222SAnusha Srivatsa 378931604222SAnusha Srivatsa hotplug = I915_READ(SHOTPLUG_CTL_TC); 379031604222SAnusha Srivatsa hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 379131604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC2) | 379231604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC3) | 379331604222SAnusha Srivatsa ICP_TC_HPD_ENABLE(PORT_TC4); 379431604222SAnusha Srivatsa I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 379531604222SAnusha Srivatsa } 379631604222SAnusha Srivatsa 379731604222SAnusha Srivatsa static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 379831604222SAnusha Srivatsa { 379931604222SAnusha Srivatsa u32 hotplug_irqs, enabled_irqs; 380031604222SAnusha Srivatsa 380131604222SAnusha Srivatsa hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 380231604222SAnusha Srivatsa enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 380331604222SAnusha Srivatsa 380431604222SAnusha Srivatsa ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 380531604222SAnusha Srivatsa 380631604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 380731604222SAnusha Srivatsa } 380831604222SAnusha Srivatsa 3809121e758eSDhinakaran Pandiyan static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3810121e758eSDhinakaran Pandiyan { 3811121e758eSDhinakaran Pandiyan u32 hotplug; 3812121e758eSDhinakaran Pandiyan 3813121e758eSDhinakaran Pandiyan hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3814121e758eSDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3815121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3816121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3817121e758eSDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3818121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3819b796b971SDhinakaran Pandiyan 3820b796b971SDhinakaran Pandiyan hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3821b796b971SDhinakaran Pandiyan hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3822b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3823b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3824b796b971SDhinakaran Pandiyan GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3825b796b971SDhinakaran Pandiyan I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3826121e758eSDhinakaran Pandiyan } 3827121e758eSDhinakaran Pandiyan 3828121e758eSDhinakaran Pandiyan static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3829121e758eSDhinakaran Pandiyan { 3830121e758eSDhinakaran Pandiyan u32 hotplug_irqs, enabled_irqs; 3831121e758eSDhinakaran Pandiyan u32 val; 3832121e758eSDhinakaran Pandiyan 3833b796b971SDhinakaran Pandiyan enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3834b796b971SDhinakaran Pandiyan hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3835121e758eSDhinakaran Pandiyan 3836121e758eSDhinakaran Pandiyan val = I915_READ(GEN11_DE_HPD_IMR); 3837121e758eSDhinakaran Pandiyan val &= ~hotplug_irqs; 3838121e758eSDhinakaran Pandiyan I915_WRITE(GEN11_DE_HPD_IMR, val); 3839121e758eSDhinakaran Pandiyan POSTING_READ(GEN11_DE_HPD_IMR); 3840121e758eSDhinakaran Pandiyan 3841121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 384231604222SAnusha Srivatsa 384331604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 384431604222SAnusha Srivatsa icp_hpd_irq_setup(dev_priv); 3845121e758eSDhinakaran Pandiyan } 3846121e758eSDhinakaran Pandiyan 38472a57d9ccSImre Deak static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 38482a57d9ccSImre Deak { 38493b92e263SRodrigo Vivi u32 val, hotplug; 38503b92e263SRodrigo Vivi 38513b92e263SRodrigo Vivi /* Display WA #1179 WaHardHangonHotPlug: cnp */ 38523b92e263SRodrigo Vivi if (HAS_PCH_CNP(dev_priv)) { 38533b92e263SRodrigo Vivi val = I915_READ(SOUTH_CHICKEN1); 38543b92e263SRodrigo Vivi val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 38553b92e263SRodrigo Vivi val |= CHASSIS_CLK_REQ_DURATION(0xf); 38563b92e263SRodrigo Vivi I915_WRITE(SOUTH_CHICKEN1, val); 38573b92e263SRodrigo Vivi } 38582a57d9ccSImre Deak 38592a57d9ccSImre Deak /* Enable digital hotplug on the PCH */ 38602a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG); 38612a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 38622a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 38632a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE | 38642a57d9ccSImre Deak PORTD_HOTPLUG_ENABLE; 38652a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 38662a57d9ccSImre Deak 38672a57d9ccSImre Deak hotplug = I915_READ(PCH_PORT_HOTPLUG2); 38682a57d9ccSImre Deak hotplug |= PORTE_HOTPLUG_ENABLE; 38692a57d9ccSImre Deak I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 38702a57d9ccSImre Deak } 38712a57d9ccSImre Deak 387291d14251STvrtko Ursulin static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 38736dbf30ceSVille Syrjälä { 38742a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 38756dbf30ceSVille Syrjälä 38766dbf30ceSVille Syrjälä hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 387791d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 38786dbf30ceSVille Syrjälä 38796dbf30ceSVille Syrjälä ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 38806dbf30ceSVille Syrjälä 38812a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 388226951cafSXiong Zhang } 38837fe0b973SKeith Packard 38841a56b1a2SImre Deak static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 38851a56b1a2SImre Deak { 38861a56b1a2SImre Deak u32 hotplug; 38871a56b1a2SImre Deak 38881a56b1a2SImre Deak /* 38891a56b1a2SImre Deak * Enable digital hotplug on the CPU, and configure the DP short pulse 38901a56b1a2SImre Deak * duration to 2ms (which is the minimum in the Display Port spec) 38911a56b1a2SImre Deak * The pulse duration bits are reserved on HSW+. 38921a56b1a2SImre Deak */ 38931a56b1a2SImre Deak hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 38941a56b1a2SImre Deak hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 38951a56b1a2SImre Deak hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 38961a56b1a2SImre Deak DIGITAL_PORTA_PULSE_DURATION_2ms; 38971a56b1a2SImre Deak I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 38981a56b1a2SImre Deak } 38991a56b1a2SImre Deak 390091d14251STvrtko Ursulin static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3901e4ce95aaSVille Syrjälä { 39021a56b1a2SImre Deak u32 hotplug_irqs, enabled_irqs; 3903e4ce95aaSVille Syrjälä 390491d14251STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 8) { 39053a3b3c7dSVille Syrjälä hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 390691d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 39073a3b3c7dSVille Syrjälä 39083a3b3c7dSVille Syrjälä bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 390991d14251STvrtko Ursulin } else if (INTEL_GEN(dev_priv) >= 7) { 391023bb4cb5SVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 391191d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 39123a3b3c7dSVille Syrjälä 39133a3b3c7dSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 391423bb4cb5SVille Syrjälä } else { 3915e4ce95aaSVille Syrjälä hotplug_irqs = DE_DP_A_HOTPLUG; 391691d14251STvrtko Ursulin enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3917e4ce95aaSVille Syrjälä 3918e4ce95aaSVille Syrjälä ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 39193a3b3c7dSVille Syrjälä } 3920e4ce95aaSVille Syrjälä 39211a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 3922e4ce95aaSVille Syrjälä 392391d14251STvrtko Ursulin ibx_hpd_irq_setup(dev_priv); 3924e4ce95aaSVille Syrjälä } 3925e4ce95aaSVille Syrjälä 39262a57d9ccSImre Deak static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 39272a57d9ccSImre Deak u32 enabled_irqs) 3928e0a20ad7SShashank Sharma { 39292a57d9ccSImre Deak u32 hotplug; 3930e0a20ad7SShashank Sharma 3931a52bb15bSVille Syrjälä hotplug = I915_READ(PCH_PORT_HOTPLUG); 39322a57d9ccSImre Deak hotplug |= PORTA_HOTPLUG_ENABLE | 39332a57d9ccSImre Deak PORTB_HOTPLUG_ENABLE | 39342a57d9ccSImre Deak PORTC_HOTPLUG_ENABLE; 3935d252bf68SShubhangi Shrivastava 3936d252bf68SShubhangi Shrivastava DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3937d252bf68SShubhangi Shrivastava hotplug, enabled_irqs); 3938d252bf68SShubhangi Shrivastava hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3939d252bf68SShubhangi Shrivastava 3940d252bf68SShubhangi Shrivastava /* 3941d252bf68SShubhangi Shrivastava * For BXT invert bit has to be set based on AOB design 3942d252bf68SShubhangi Shrivastava * for HPD detection logic, update it based on VBT fields. 3943d252bf68SShubhangi Shrivastava */ 3944d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3945d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3946d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIA_HPD_INVERT; 3947d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3948d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3949d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIB_HPD_INVERT; 3950d252bf68SShubhangi Shrivastava if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3951d252bf68SShubhangi Shrivastava intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3952d252bf68SShubhangi Shrivastava hotplug |= BXT_DDIC_HPD_INVERT; 3953d252bf68SShubhangi Shrivastava 3954a52bb15bSVille Syrjälä I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3955e0a20ad7SShashank Sharma } 3956e0a20ad7SShashank Sharma 39572a57d9ccSImre Deak static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 39582a57d9ccSImre Deak { 39592a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 39602a57d9ccSImre Deak } 39612a57d9ccSImre Deak 39622a57d9ccSImre Deak static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 39632a57d9ccSImre Deak { 39642a57d9ccSImre Deak u32 hotplug_irqs, enabled_irqs; 39652a57d9ccSImre Deak 39662a57d9ccSImre Deak enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 39672a57d9ccSImre Deak hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 39682a57d9ccSImre Deak 39692a57d9ccSImre Deak bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 39702a57d9ccSImre Deak 39712a57d9ccSImre Deak __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 39722a57d9ccSImre Deak } 39732a57d9ccSImre Deak 3974d46da437SPaulo Zanoni static void ibx_irq_postinstall(struct drm_device *dev) 3975d46da437SPaulo Zanoni { 3976fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 397782a28bcfSDaniel Vetter u32 mask; 3978d46da437SPaulo Zanoni 39796e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 3980692a04cfSDaniel Vetter return; 3981692a04cfSDaniel Vetter 39826e266956STvrtko Ursulin if (HAS_PCH_IBX(dev_priv)) 39835c673b60SDaniel Vetter mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 39844ebc6509SDhinakaran Pandiyan else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 39855c673b60SDaniel Vetter mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 39864ebc6509SDhinakaran Pandiyan else 39874ebc6509SDhinakaran Pandiyan mask = SDE_GMBUS_CPT; 39888664281bSPaulo Zanoni 39893488d4ebSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3990d46da437SPaulo Zanoni I915_WRITE(SDEIMR, ~mask); 39912a57d9ccSImre Deak 39922a57d9ccSImre Deak if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 39932a57d9ccSImre Deak HAS_PCH_LPT(dev_priv)) 39941a56b1a2SImre Deak ibx_hpd_detection_setup(dev_priv); 39952a57d9ccSImre Deak else 39962a57d9ccSImre Deak spt_hpd_detection_setup(dev_priv); 3997d46da437SPaulo Zanoni } 3998d46da437SPaulo Zanoni 39990a9a8c91SDaniel Vetter static void gen5_gt_irq_postinstall(struct drm_device *dev) 40000a9a8c91SDaniel Vetter { 4001fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40020a9a8c91SDaniel Vetter u32 pm_irqs, gt_irqs; 40030a9a8c91SDaniel Vetter 40040a9a8c91SDaniel Vetter pm_irqs = gt_irqs = 0; 40050a9a8c91SDaniel Vetter 40060a9a8c91SDaniel Vetter dev_priv->gt_irq_mask = ~0; 40073c9192bcSTvrtko Ursulin if (HAS_L3_DPF(dev_priv)) { 40080a9a8c91SDaniel Vetter /* L3 parity interrupt is always unmasked. */ 4009772c2a51STvrtko Ursulin dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4010772c2a51STvrtko Ursulin gt_irqs |= GT_PARITY_ERROR(dev_priv); 40110a9a8c91SDaniel Vetter } 40120a9a8c91SDaniel Vetter 40130a9a8c91SDaniel Vetter gt_irqs |= GT_RENDER_USER_INTERRUPT; 40145db94019STvrtko Ursulin if (IS_GEN5(dev_priv)) { 4015f8973c21SChris Wilson gt_irqs |= ILK_BSD_USER_INTERRUPT; 40160a9a8c91SDaniel Vetter } else { 40170a9a8c91SDaniel Vetter gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 40180a9a8c91SDaniel Vetter } 40190a9a8c91SDaniel Vetter 40203488d4ebSVille Syrjälä GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 40210a9a8c91SDaniel Vetter 4022b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 6) { 402378e68d36SImre Deak /* 402478e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS 402578e68d36SImre Deak * itself is enabled/disabled. 402678e68d36SImre Deak */ 4027f4e9af4fSAkash Goel if (HAS_VEBOX(dev_priv)) { 40280a9a8c91SDaniel Vetter pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4029f4e9af4fSAkash Goel dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4030f4e9af4fSAkash Goel } 40310a9a8c91SDaniel Vetter 4032f4e9af4fSAkash Goel dev_priv->pm_imr = 0xffffffff; 40333488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 40340a9a8c91SDaniel Vetter } 40350a9a8c91SDaniel Vetter } 40360a9a8c91SDaniel Vetter 4037f71d4af4SJesse Barnes static int ironlake_irq_postinstall(struct drm_device *dev) 4038036a4a7dSZhenyu Wang { 4039fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 40408e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 40418e76f8dcSPaulo Zanoni 4042b243f530STvrtko Ursulin if (INTEL_GEN(dev_priv) >= 7) { 40438e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4044842ebf7aSVille Syrjälä DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 40458e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 404623bb4cb5SVille Syrjälä DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 404723bb4cb5SVille Syrjälä DE_DP_A_HOTPLUG_IVB); 40488e76f8dcSPaulo Zanoni } else { 40498e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4050842ebf7aSVille Syrjälä DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4051842ebf7aSVille Syrjälä DE_PIPEA_CRC_DONE | DE_POISON); 4052e4ce95aaSVille Syrjälä extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4053e4ce95aaSVille Syrjälä DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4054e4ce95aaSVille Syrjälä DE_DP_A_HOTPLUG); 40558e76f8dcSPaulo Zanoni } 4056036a4a7dSZhenyu Wang 4057fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 4058fc340442SDaniel Vetter gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 405954fd3149SDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4060fc340442SDaniel Vetter display_mask |= DE_EDP_PSR_INT_HSW; 4061fc340442SDaniel Vetter } 4062fc340442SDaniel Vetter 40631ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 4064036a4a7dSZhenyu Wang 4065622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4066622364b6SPaulo Zanoni 40673488d4ebSVille Syrjälä GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4068036a4a7dSZhenyu Wang 40690a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 4070036a4a7dSZhenyu Wang 40711a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 40721a56b1a2SImre Deak 4073d46da437SPaulo Zanoni ibx_irq_postinstall(dev); 40747fe0b973SKeith Packard 407550a0bc90STvrtko Ursulin if (IS_IRONLAKE_M(dev_priv)) { 40766005ce42SDaniel Vetter /* Enable PCU event interrupts 40776005ce42SDaniel Vetter * 40786005ce42SDaniel Vetter * spinlocking not required here for correctness since interrupt 40794bc9d430SDaniel Vetter * setup is guaranteed to run in single-threaded context. But we 40804bc9d430SDaniel Vetter * need it to make the assert_spin_locked happy. */ 4081d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4082fbdedaeaSVille Syrjälä ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4083d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4084f97108d1SJesse Barnes } 4085f97108d1SJesse Barnes 4086036a4a7dSZhenyu Wang return 0; 4087036a4a7dSZhenyu Wang } 4088036a4a7dSZhenyu Wang 4089f8b79e58SImre Deak void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4090f8b79e58SImre Deak { 409167520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4092f8b79e58SImre Deak 4093f8b79e58SImre Deak if (dev_priv->display_irqs_enabled) 4094f8b79e58SImre Deak return; 4095f8b79e58SImre Deak 4096f8b79e58SImre Deak dev_priv->display_irqs_enabled = true; 4097f8b79e58SImre Deak 4098d6c69803SVille Syrjälä if (intel_irqs_enabled(dev_priv)) { 4099d6c69803SVille Syrjälä vlv_display_irq_reset(dev_priv); 4100ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4101f8b79e58SImre Deak } 4102d6c69803SVille Syrjälä } 4103f8b79e58SImre Deak 4104f8b79e58SImre Deak void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4105f8b79e58SImre Deak { 410667520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4107f8b79e58SImre Deak 4108f8b79e58SImre Deak if (!dev_priv->display_irqs_enabled) 4109f8b79e58SImre Deak return; 4110f8b79e58SImre Deak 4111f8b79e58SImre Deak dev_priv->display_irqs_enabled = false; 4112f8b79e58SImre Deak 4113950eabafSImre Deak if (intel_irqs_enabled(dev_priv)) 4114ad22d106SVille Syrjälä vlv_display_irq_reset(dev_priv); 4115f8b79e58SImre Deak } 4116f8b79e58SImre Deak 41170e6c9a9eSVille Syrjälä 41180e6c9a9eSVille Syrjälä static int valleyview_irq_postinstall(struct drm_device *dev) 41190e6c9a9eSVille Syrjälä { 4120fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 41210e6c9a9eSVille Syrjälä 41220a9a8c91SDaniel Vetter gen5_gt_irq_postinstall(dev); 41237e231dbeSJesse Barnes 4124ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 41259918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4126ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4127ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4128ad22d106SVille Syrjälä 41297e231dbeSJesse Barnes I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 413034c7b8a7SVille Syrjälä POSTING_READ(VLV_MASTER_IER); 413120afbda2SDaniel Vetter 413220afbda2SDaniel Vetter return 0; 413320afbda2SDaniel Vetter } 413420afbda2SDaniel Vetter 4135abd58f01SBen Widawsky static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4136abd58f01SBen Widawsky { 4137abd58f01SBen Widawsky /* These are interrupts we'll toggle with the ring mask register */ 4138abd58f01SBen Widawsky uint32_t gt_interrupts[] = { 4139abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 414073d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 414173d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 414273d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4143abd58f01SBen Widawsky GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 414473d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 414573d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 414673d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4147abd58f01SBen Widawsky 0, 414873d477f6SOscar Mateo GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 414973d477f6SOscar Mateo GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4150abd58f01SBen Widawsky }; 4151abd58f01SBen Widawsky 415298735739STvrtko Ursulin if (HAS_L3_DPF(dev_priv)) 415398735739STvrtko Ursulin gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 415498735739STvrtko Ursulin 4155f4e9af4fSAkash Goel dev_priv->pm_ier = 0x0; 4156f4e9af4fSAkash Goel dev_priv->pm_imr = ~dev_priv->pm_ier; 41579a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 41589a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 415978e68d36SImre Deak /* 416078e68d36SImre Deak * RPS interrupts will get enabled/disabled on demand when RPS itself 416126705e20SSagar Arun Kamble * is enabled/disabled. Same wil be the case for GuC interrupts. 416278e68d36SImre Deak */ 4163f4e9af4fSAkash Goel GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 41649a2d2d87SDeepak S GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4165abd58f01SBen Widawsky } 4166abd58f01SBen Widawsky 4167abd58f01SBen Widawsky static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4168abd58f01SBen Widawsky { 4169770de83dSDamien Lespiau uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4170770de83dSDamien Lespiau uint32_t de_pipe_enables; 41713a3b3c7dSVille Syrjälä u32 de_port_masked = GEN8_AUX_CHANNEL_A; 41723a3b3c7dSVille Syrjälä u32 de_port_enables; 4173df0d28c1SDhinakaran Pandiyan u32 de_misc_masked = GEN8_DE_EDP_PSR; 41743a3b3c7dSVille Syrjälä enum pipe pipe; 4175770de83dSDamien Lespiau 4176df0d28c1SDhinakaran Pandiyan if (INTEL_GEN(dev_priv) <= 10) 4177df0d28c1SDhinakaran Pandiyan de_misc_masked |= GEN8_DE_MISC_GSE; 4178df0d28c1SDhinakaran Pandiyan 4179bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 9) { 4180842ebf7aSVille Syrjälä de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 41813a3b3c7dSVille Syrjälä de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 418288e04703SJesse Barnes GEN9_AUX_CHANNEL_D; 4183cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 41843a3b3c7dSVille Syrjälä de_port_masked |= BXT_DE_PORT_GMBUS; 41853a3b3c7dSVille Syrjälä } else { 4186842ebf7aSVille Syrjälä de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 41873a3b3c7dSVille Syrjälä } 4188770de83dSDamien Lespiau 4189bb187e93SJames Ausmus if (INTEL_GEN(dev_priv) >= 11) 4190bb187e93SJames Ausmus de_port_masked |= ICL_AUX_CHANNEL_E; 4191bb187e93SJames Ausmus 41929bb635d9SDhinakaran Pandiyan if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4193a324fcacSRodrigo Vivi de_port_masked |= CNL_AUX_CHANNEL_F; 4194a324fcacSRodrigo Vivi 4195770de83dSDamien Lespiau de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4196770de83dSDamien Lespiau GEN8_PIPE_FIFO_UNDERRUN; 4197770de83dSDamien Lespiau 41983a3b3c7dSVille Syrjälä de_port_enables = de_port_masked; 4199cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4200a52bb15bSVille Syrjälä de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4201a52bb15bSVille Syrjälä else if (IS_BROADWELL(dev_priv)) 42023a3b3c7dSVille Syrjälä de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 42033a3b3c7dSVille Syrjälä 4204e04f7eceSVille Syrjälä gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 420554fd3149SDhinakaran Pandiyan intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4206e04f7eceSVille Syrjälä 42070a195c02SMika Kahola for_each_pipe(dev_priv, pipe) { 42080a195c02SMika Kahola dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4209abd58f01SBen Widawsky 4210f458ebbcSDaniel Vetter if (intel_display_power_is_enabled(dev_priv, 4211813bde43SPaulo Zanoni POWER_DOMAIN_PIPE(pipe))) 4212813bde43SPaulo Zanoni GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4213813bde43SPaulo Zanoni dev_priv->de_irq_mask[pipe], 421435079899SPaulo Zanoni de_pipe_enables); 42150a195c02SMika Kahola } 4216abd58f01SBen Widawsky 42173488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 42183488d4ebSVille Syrjälä GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 42192a57d9ccSImre Deak 4220121e758eSDhinakaran Pandiyan if (INTEL_GEN(dev_priv) >= 11) { 4221121e758eSDhinakaran Pandiyan u32 de_hpd_masked = 0; 4222b796b971SDhinakaran Pandiyan u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4223b796b971SDhinakaran Pandiyan GEN11_DE_TBT_HOTPLUG_MASK; 4224121e758eSDhinakaran Pandiyan 4225121e758eSDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4226121e758eSDhinakaran Pandiyan gen11_hpd_detection_setup(dev_priv); 4227121e758eSDhinakaran Pandiyan } else if (IS_GEN9_LP(dev_priv)) { 42282a57d9ccSImre Deak bxt_hpd_detection_setup(dev_priv); 4229121e758eSDhinakaran Pandiyan } else if (IS_BROADWELL(dev_priv)) { 42301a56b1a2SImre Deak ilk_hpd_detection_setup(dev_priv); 4231abd58f01SBen Widawsky } 4232121e758eSDhinakaran Pandiyan } 4233abd58f01SBen Widawsky 4234abd58f01SBen Widawsky static int gen8_irq_postinstall(struct drm_device *dev) 4235abd58f01SBen Widawsky { 4236fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4237abd58f01SBen Widawsky 42386e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4239622364b6SPaulo Zanoni ibx_irq_pre_postinstall(dev); 4240622364b6SPaulo Zanoni 4241abd58f01SBen Widawsky gen8_gt_irq_postinstall(dev_priv); 4242abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 4243abd58f01SBen Widawsky 42446e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 4245abd58f01SBen Widawsky ibx_irq_postinstall(dev); 4246abd58f01SBen Widawsky 4247e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4248abd58f01SBen Widawsky POSTING_READ(GEN8_MASTER_IRQ); 4249abd58f01SBen Widawsky 4250abd58f01SBen Widawsky return 0; 4251abd58f01SBen Widawsky } 4252abd58f01SBen Widawsky 425351951ae7SMika Kuoppala static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 425451951ae7SMika Kuoppala { 425551951ae7SMika Kuoppala const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 425651951ae7SMika Kuoppala 425751951ae7SMika Kuoppala BUILD_BUG_ON(irqs & 0xffff0000); 425851951ae7SMika Kuoppala 425951951ae7SMika Kuoppala /* Enable RCS, BCS, VCS and VECS class interrupts. */ 426051951ae7SMika Kuoppala I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 426151951ae7SMika Kuoppala I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 426251951ae7SMika Kuoppala 426351951ae7SMika Kuoppala /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 426451951ae7SMika Kuoppala I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 426551951ae7SMika Kuoppala I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 426651951ae7SMika Kuoppala I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 426751951ae7SMika Kuoppala I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 426851951ae7SMika Kuoppala I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 426951951ae7SMika Kuoppala 4270d02b98b8SOscar Mateo /* 4271d02b98b8SOscar Mateo * RPS interrupts will get enabled/disabled on demand when RPS itself 4272d02b98b8SOscar Mateo * is enabled/disabled. 4273d02b98b8SOscar Mateo */ 4274d02b98b8SOscar Mateo dev_priv->pm_ier = 0x0; 4275d02b98b8SOscar Mateo dev_priv->pm_imr = ~dev_priv->pm_ier; 4276d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4277d02b98b8SOscar Mateo I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 427851951ae7SMika Kuoppala } 427951951ae7SMika Kuoppala 428031604222SAnusha Srivatsa static void icp_irq_postinstall(struct drm_device *dev) 428131604222SAnusha Srivatsa { 428231604222SAnusha Srivatsa struct drm_i915_private *dev_priv = to_i915(dev); 428331604222SAnusha Srivatsa u32 mask = SDE_GMBUS_ICP; 428431604222SAnusha Srivatsa 428531604222SAnusha Srivatsa WARN_ON(I915_READ(SDEIER) != 0); 428631604222SAnusha Srivatsa I915_WRITE(SDEIER, 0xffffffff); 428731604222SAnusha Srivatsa POSTING_READ(SDEIER); 428831604222SAnusha Srivatsa 428931604222SAnusha Srivatsa gen3_assert_iir_is_zero(dev_priv, SDEIIR); 429031604222SAnusha Srivatsa I915_WRITE(SDEIMR, ~mask); 429131604222SAnusha Srivatsa 429231604222SAnusha Srivatsa icp_hpd_detection_setup(dev_priv); 429331604222SAnusha Srivatsa } 429431604222SAnusha Srivatsa 429551951ae7SMika Kuoppala static int gen11_irq_postinstall(struct drm_device *dev) 429651951ae7SMika Kuoppala { 429751951ae7SMika Kuoppala struct drm_i915_private *dev_priv = dev->dev_private; 4298df0d28c1SDhinakaran Pandiyan u32 gu_misc_masked = GEN11_GU_MISC_GSE; 429951951ae7SMika Kuoppala 430031604222SAnusha Srivatsa if (HAS_PCH_ICP(dev_priv)) 430131604222SAnusha Srivatsa icp_irq_postinstall(dev); 430231604222SAnusha Srivatsa 430351951ae7SMika Kuoppala gen11_gt_irq_postinstall(dev_priv); 430451951ae7SMika Kuoppala gen8_de_irq_postinstall(dev_priv); 430551951ae7SMika Kuoppala 4306df0d28c1SDhinakaran Pandiyan GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4307df0d28c1SDhinakaran Pandiyan 430851951ae7SMika Kuoppala I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 430951951ae7SMika Kuoppala 431051951ae7SMika Kuoppala I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 431151951ae7SMika Kuoppala POSTING_READ(GEN11_GFX_MSTR_IRQ); 431251951ae7SMika Kuoppala 431351951ae7SMika Kuoppala return 0; 431451951ae7SMika Kuoppala } 431551951ae7SMika Kuoppala 431643f328d7SVille Syrjälä static int cherryview_irq_postinstall(struct drm_device *dev) 431743f328d7SVille Syrjälä { 4318fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 431943f328d7SVille Syrjälä 432043f328d7SVille Syrjälä gen8_gt_irq_postinstall(dev_priv); 432143f328d7SVille Syrjälä 4322ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 43239918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 4324ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 4325ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 4326ad22d106SVille Syrjälä 4327e5328c43SVille Syrjälä I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 432843f328d7SVille Syrjälä POSTING_READ(GEN8_MASTER_IRQ); 432943f328d7SVille Syrjälä 433043f328d7SVille Syrjälä return 0; 433143f328d7SVille Syrjälä } 433243f328d7SVille Syrjälä 43336bcdb1c8SVille Syrjälä static void i8xx_irq_reset(struct drm_device *dev) 4334c2798b19SChris Wilson { 4335fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4336c2798b19SChris Wilson 433744d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 433844d9241eSVille Syrjälä 4339d420a50cSVille Syrjälä I915_WRITE16(HWSTAM, 0xffff); 4340d420a50cSVille Syrjälä 4341e9e9848aSVille Syrjälä GEN2_IRQ_RESET(); 4342c2798b19SChris Wilson } 4343c2798b19SChris Wilson 4344c2798b19SChris Wilson static int i8xx_irq_postinstall(struct drm_device *dev) 4345c2798b19SChris Wilson { 4346fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4347e9e9848aSVille Syrjälä u16 enable_mask; 4348c2798b19SChris Wilson 4349045cebd2SVille Syrjälä I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4350045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 4351c2798b19SChris Wilson 4352c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 4353c2798b19SChris Wilson dev_priv->irq_mask = 4354c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 435516659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 435616659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4357c2798b19SChris Wilson 4358e9e9848aSVille Syrjälä enable_mask = 4359c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4360c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 436116659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4362e9e9848aSVille Syrjälä I915_USER_INTERRUPT; 4363e9e9848aSVille Syrjälä 4364e9e9848aSVille Syrjälä GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4365c2798b19SChris Wilson 4366379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4367379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4368d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4369755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4370755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4371d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4372379ef82dSDaniel Vetter 4373c2798b19SChris Wilson return 0; 4374c2798b19SChris Wilson } 4375c2798b19SChris Wilson 437678c357ddSVille Syrjälä static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 437778c357ddSVille Syrjälä u16 *eir, u16 *eir_stuck) 437878c357ddSVille Syrjälä { 437978c357ddSVille Syrjälä u16 emr; 438078c357ddSVille Syrjälä 438178c357ddSVille Syrjälä *eir = I915_READ16(EIR); 438278c357ddSVille Syrjälä 438378c357ddSVille Syrjälä if (*eir) 438478c357ddSVille Syrjälä I915_WRITE16(EIR, *eir); 438578c357ddSVille Syrjälä 438678c357ddSVille Syrjälä *eir_stuck = I915_READ16(EIR); 438778c357ddSVille Syrjälä if (*eir_stuck == 0) 438878c357ddSVille Syrjälä return; 438978c357ddSVille Syrjälä 439078c357ddSVille Syrjälä /* 439178c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 439278c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 439378c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 439478c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 439578c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 439678c357ddSVille Syrjälä * cleared except by handling the underlying error 439778c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 439878c357ddSVille Syrjälä * remains set. 439978c357ddSVille Syrjälä */ 440078c357ddSVille Syrjälä emr = I915_READ16(EMR); 440178c357ddSVille Syrjälä I915_WRITE16(EMR, 0xffff); 440278c357ddSVille Syrjälä I915_WRITE16(EMR, emr | *eir_stuck); 440378c357ddSVille Syrjälä } 440478c357ddSVille Syrjälä 440578c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 440678c357ddSVille Syrjälä u16 eir, u16 eir_stuck) 440778c357ddSVille Syrjälä { 440878c357ddSVille Syrjälä DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 440978c357ddSVille Syrjälä 441078c357ddSVille Syrjälä if (eir_stuck) 441178c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 441278c357ddSVille Syrjälä } 441378c357ddSVille Syrjälä 441478c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 441578c357ddSVille Syrjälä u32 *eir, u32 *eir_stuck) 441678c357ddSVille Syrjälä { 441778c357ddSVille Syrjälä u32 emr; 441878c357ddSVille Syrjälä 441978c357ddSVille Syrjälä *eir = I915_READ(EIR); 442078c357ddSVille Syrjälä 442178c357ddSVille Syrjälä I915_WRITE(EIR, *eir); 442278c357ddSVille Syrjälä 442378c357ddSVille Syrjälä *eir_stuck = I915_READ(EIR); 442478c357ddSVille Syrjälä if (*eir_stuck == 0) 442578c357ddSVille Syrjälä return; 442678c357ddSVille Syrjälä 442778c357ddSVille Syrjälä /* 442878c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 442978c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 443078c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 443178c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 443278c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 443378c357ddSVille Syrjälä * cleared except by handling the underlying error 443478c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 443578c357ddSVille Syrjälä * remains set. 443678c357ddSVille Syrjälä */ 443778c357ddSVille Syrjälä emr = I915_READ(EMR); 443878c357ddSVille Syrjälä I915_WRITE(EMR, 0xffffffff); 443978c357ddSVille Syrjälä I915_WRITE(EMR, emr | *eir_stuck); 444078c357ddSVille Syrjälä } 444178c357ddSVille Syrjälä 444278c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 444378c357ddSVille Syrjälä u32 eir, u32 eir_stuck) 444478c357ddSVille Syrjälä { 444578c357ddSVille Syrjälä DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 444678c357ddSVille Syrjälä 444778c357ddSVille Syrjälä if (eir_stuck) 444878c357ddSVille Syrjälä DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 444978c357ddSVille Syrjälä } 445078c357ddSVille Syrjälä 4451ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4452c2798b19SChris Wilson { 445345a83f84SDaniel Vetter struct drm_device *dev = arg; 4454fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4455af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4456c2798b19SChris Wilson 44572dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 44582dd2a883SImre Deak return IRQ_NONE; 44592dd2a883SImre Deak 44601f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 44611f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 44621f814dacSImre Deak 4463af722d28SVille Syrjälä do { 4464af722d28SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 446578c357ddSVille Syrjälä u16 eir = 0, eir_stuck = 0; 4466af722d28SVille Syrjälä u16 iir; 4467af722d28SVille Syrjälä 4468c2798b19SChris Wilson iir = I915_READ16(IIR); 4469c2798b19SChris Wilson if (iir == 0) 4470af722d28SVille Syrjälä break; 4471c2798b19SChris Wilson 4472af722d28SVille Syrjälä ret = IRQ_HANDLED; 4473c2798b19SChris Wilson 4474eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4475eb64343cSVille Syrjälä * signalled in iir */ 4476eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4477c2798b19SChris Wilson 447878c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 447978c357ddSVille Syrjälä i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 448078c357ddSVille Syrjälä 4481fd3a4024SDaniel Vetter I915_WRITE16(IIR, iir); 4482c2798b19SChris Wilson 4483c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 44843b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4485c2798b19SChris Wilson 448678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 448778c357ddSVille Syrjälä i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4488af722d28SVille Syrjälä 4489eb64343cSVille Syrjälä i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4490af722d28SVille Syrjälä } while (0); 4491c2798b19SChris Wilson 44921f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 44931f814dacSImre Deak 44941f814dacSImre Deak return ret; 4495c2798b19SChris Wilson } 4496c2798b19SChris Wilson 44976bcdb1c8SVille Syrjälä static void i915_irq_reset(struct drm_device *dev) 4498a266c7d5SChris Wilson { 4499fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4500a266c7d5SChris Wilson 450156b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 45020706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4503a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4504a266c7d5SChris Wilson } 4505a266c7d5SChris Wilson 450644d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 450744d9241eSVille Syrjälä 4508d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 450944d9241eSVille Syrjälä 4510ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4511a266c7d5SChris Wilson } 4512a266c7d5SChris Wilson 4513a266c7d5SChris Wilson static int i915_irq_postinstall(struct drm_device *dev) 4514a266c7d5SChris Wilson { 4515fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 451638bde180SChris Wilson u32 enable_mask; 4517a266c7d5SChris Wilson 4518045cebd2SVille Syrjälä I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4519045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH)); 452038bde180SChris Wilson 452138bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 452238bde180SChris Wilson dev_priv->irq_mask = 452338bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 452438bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 452516659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 452616659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 452738bde180SChris Wilson 452838bde180SChris Wilson enable_mask = 452938bde180SChris Wilson I915_ASLE_INTERRUPT | 453038bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 453138bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 453216659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 453338bde180SChris Wilson I915_USER_INTERRUPT; 453438bde180SChris Wilson 453556b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 4536a266c7d5SChris Wilson /* Enable in IER... */ 4537a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4538a266c7d5SChris Wilson /* and unmask in IMR */ 4539a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4540a266c7d5SChris Wilson } 4541a266c7d5SChris Wilson 4542ba7eb789SVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4543a266c7d5SChris Wilson 4544379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4545379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4546d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4547755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4548755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4549d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4550379ef82dSDaniel Vetter 4551c30bb1fdSVille Syrjälä i915_enable_asle_pipestat(dev_priv); 4552c30bb1fdSVille Syrjälä 455320afbda2SDaniel Vetter return 0; 455420afbda2SDaniel Vetter } 455520afbda2SDaniel Vetter 4556ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 4557a266c7d5SChris Wilson { 455845a83f84SDaniel Vetter struct drm_device *dev = arg; 4559fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4560af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4561a266c7d5SChris Wilson 45622dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 45632dd2a883SImre Deak return IRQ_NONE; 45642dd2a883SImre Deak 45651f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 45661f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 45671f814dacSImre Deak 456838bde180SChris Wilson do { 4569eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 457078c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4571af722d28SVille Syrjälä u32 hotplug_status = 0; 4572af722d28SVille Syrjälä u32 iir; 4573a266c7d5SChris Wilson 4574af722d28SVille Syrjälä iir = I915_READ(IIR); 4575af722d28SVille Syrjälä if (iir == 0) 4576af722d28SVille Syrjälä break; 4577af722d28SVille Syrjälä 4578af722d28SVille Syrjälä ret = IRQ_HANDLED; 4579af722d28SVille Syrjälä 4580af722d28SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv) && 4581af722d28SVille Syrjälä iir & I915_DISPLAY_PORT_INTERRUPT) 4582af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4583a266c7d5SChris Wilson 4584eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4585eb64343cSVille Syrjälä * signalled in iir */ 4586eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4587a266c7d5SChris Wilson 458878c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 458978c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 459078c357ddSVille Syrjälä 4591fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4592a266c7d5SChris Wilson 4593a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 45943b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4595a266c7d5SChris Wilson 459678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 459778c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4598a266c7d5SChris Wilson 4599af722d28SVille Syrjälä if (hotplug_status) 4600af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4601af722d28SVille Syrjälä 4602af722d28SVille Syrjälä i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4603af722d28SVille Syrjälä } while (0); 4604a266c7d5SChris Wilson 46051f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 46061f814dacSImre Deak 4607a266c7d5SChris Wilson return ret; 4608a266c7d5SChris Wilson } 4609a266c7d5SChris Wilson 46106bcdb1c8SVille Syrjälä static void i965_irq_reset(struct drm_device *dev) 4611a266c7d5SChris Wilson { 4612fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4613a266c7d5SChris Wilson 46140706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4615a266c7d5SChris Wilson I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4616a266c7d5SChris Wilson 461744d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 461844d9241eSVille Syrjälä 4619d420a50cSVille Syrjälä I915_WRITE(HWSTAM, 0xffffffff); 462044d9241eSVille Syrjälä 4621ba7eb789SVille Syrjälä GEN3_IRQ_RESET(); 4622a266c7d5SChris Wilson } 4623a266c7d5SChris Wilson 4624a266c7d5SChris Wilson static int i965_irq_postinstall(struct drm_device *dev) 4625a266c7d5SChris Wilson { 4626fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4627bbba0a97SChris Wilson u32 enable_mask; 4628a266c7d5SChris Wilson u32 error_mask; 4629a266c7d5SChris Wilson 4630045cebd2SVille Syrjälä /* 4631045cebd2SVille Syrjälä * Enable some error detection, note the instruction error mask 4632045cebd2SVille Syrjälä * bit is reserved, so we leave it masked. 4633045cebd2SVille Syrjälä */ 4634045cebd2SVille Syrjälä if (IS_G4X(dev_priv)) { 4635045cebd2SVille Syrjälä error_mask = ~(GM45_ERROR_PAGE_TABLE | 4636045cebd2SVille Syrjälä GM45_ERROR_MEM_PRIV | 4637045cebd2SVille Syrjälä GM45_ERROR_CP_PRIV | 4638045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4639045cebd2SVille Syrjälä } else { 4640045cebd2SVille Syrjälä error_mask = ~(I915_ERROR_PAGE_TABLE | 4641045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 4642045cebd2SVille Syrjälä } 4643045cebd2SVille Syrjälä I915_WRITE(EMR, error_mask); 4644045cebd2SVille Syrjälä 4645a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 4646c30bb1fdSVille Syrjälä dev_priv->irq_mask = 4647c30bb1fdSVille Syrjälä ~(I915_ASLE_INTERRUPT | 4648adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 4649bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4650bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 465178c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 4652bbba0a97SChris Wilson 4653c30bb1fdSVille Syrjälä enable_mask = 4654c30bb1fdSVille Syrjälä I915_ASLE_INTERRUPT | 4655c30bb1fdSVille Syrjälä I915_DISPLAY_PORT_INTERRUPT | 4656c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4657c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 465878c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 4659c30bb1fdSVille Syrjälä I915_USER_INTERRUPT; 4660bbba0a97SChris Wilson 466191d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4662bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 4663a266c7d5SChris Wilson 4664c30bb1fdSVille Syrjälä GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4665c30bb1fdSVille Syrjälä 4666b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 4667b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 4668d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 4669755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4670755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4671755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4672d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 4673a266c7d5SChris Wilson 467491d14251STvrtko Ursulin i915_enable_asle_pipestat(dev_priv); 467520afbda2SDaniel Vetter 467620afbda2SDaniel Vetter return 0; 467720afbda2SDaniel Vetter } 467820afbda2SDaniel Vetter 467991d14251STvrtko Ursulin static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 468020afbda2SDaniel Vetter { 468120afbda2SDaniel Vetter u32 hotplug_en; 468220afbda2SDaniel Vetter 468367520415SChris Wilson lockdep_assert_held(&dev_priv->irq_lock); 4684b5ea2d56SDaniel Vetter 4685adca4730SChris Wilson /* Note HDMI and DP share hotplug bits */ 4686e5868a31SEgbert Eich /* enable bits are the same for all generations */ 468791d14251STvrtko Ursulin hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4688a266c7d5SChris Wilson /* Programming the CRT detection parameters tends 4689a266c7d5SChris Wilson to generate a spurious hotplug event about three 4690a266c7d5SChris Wilson seconds later. So just do it once. 4691a266c7d5SChris Wilson */ 469291d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 4693a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4694a266c7d5SChris Wilson hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4695a266c7d5SChris Wilson 4696a266c7d5SChris Wilson /* Ignore TV since it's buggy */ 46970706f17cSEgbert Eich i915_hotplug_interrupt_update_locked(dev_priv, 4698f9e3dc78SJani Nikula HOTPLUG_INT_EN_MASK | 4699f9e3dc78SJani Nikula CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4700f9e3dc78SJani Nikula CRT_HOTPLUG_ACTIVATION_PERIOD_64, 47010706f17cSEgbert Eich hotplug_en); 4702a266c7d5SChris Wilson } 4703a266c7d5SChris Wilson 4704ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 4705a266c7d5SChris Wilson { 470645a83f84SDaniel Vetter struct drm_device *dev = arg; 4707fac5e23eSChris Wilson struct drm_i915_private *dev_priv = to_i915(dev); 4708af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 4709a266c7d5SChris Wilson 47102dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 47112dd2a883SImre Deak return IRQ_NONE; 47122dd2a883SImre Deak 47131f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 47141f814dacSImre Deak disable_rpm_wakeref_asserts(dev_priv); 47151f814dacSImre Deak 4716af722d28SVille Syrjälä do { 4717eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 471878c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 4719af722d28SVille Syrjälä u32 hotplug_status = 0; 4720af722d28SVille Syrjälä u32 iir; 47212c8ba29fSChris Wilson 4722af722d28SVille Syrjälä iir = I915_READ(IIR); 4723af722d28SVille Syrjälä if (iir == 0) 4724af722d28SVille Syrjälä break; 4725af722d28SVille Syrjälä 4726af722d28SVille Syrjälä ret = IRQ_HANDLED; 4727af722d28SVille Syrjälä 4728af722d28SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 4729af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4730a266c7d5SChris Wilson 4731eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 4732eb64343cSVille Syrjälä * signalled in iir */ 4733eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4734a266c7d5SChris Wilson 473578c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 473678c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 473778c357ddSVille Syrjälä 4738fd3a4024SDaniel Vetter I915_WRITE(IIR, iir); 4739a266c7d5SChris Wilson 4740a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 47413b3f1650SAkash Goel notify_ring(dev_priv->engine[RCS]); 4742af722d28SVille Syrjälä 4743a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 47443b3f1650SAkash Goel notify_ring(dev_priv->engine[VCS]); 4745a266c7d5SChris Wilson 474678c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 474778c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4748515ac2bbSDaniel Vetter 4749af722d28SVille Syrjälä if (hotplug_status) 4750af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4751af722d28SVille Syrjälä 4752af722d28SVille Syrjälä i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4753af722d28SVille Syrjälä } while (0); 4754a266c7d5SChris Wilson 47551f814dacSImre Deak enable_rpm_wakeref_asserts(dev_priv); 47561f814dacSImre Deak 4757a266c7d5SChris Wilson return ret; 4758a266c7d5SChris Wilson } 4759a266c7d5SChris Wilson 4760fca52a55SDaniel Vetter /** 4761fca52a55SDaniel Vetter * intel_irq_init - initializes irq support 4762fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4763fca52a55SDaniel Vetter * 4764fca52a55SDaniel Vetter * This function initializes all the irq support including work items, timers 4765fca52a55SDaniel Vetter * and all the vtables. It does not setup the interrupt itself though. 4766fca52a55SDaniel Vetter */ 4767b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv) 4768f71d4af4SJesse Barnes { 476991c8a326SChris Wilson struct drm_device *dev = &dev_priv->drm; 4770562d9baeSSagar Arun Kamble struct intel_rps *rps = &dev_priv->gt_pm.rps; 4771cefcff8fSJoonas Lahtinen int i; 47728b2e326dSChris Wilson 477377913b39SJani Nikula intel_hpd_init_work(dev_priv); 477477913b39SJani Nikula 4775562d9baeSSagar Arun Kamble INIT_WORK(&rps->work, gen6_pm_rps_work); 4776cefcff8fSJoonas Lahtinen 4777a4da4fa4SDaniel Vetter INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4778cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4779cefcff8fSJoonas Lahtinen dev_priv->l3_parity.remap_info[i] = NULL; 47808b2e326dSChris Wilson 47814805fe82STvrtko Ursulin if (HAS_GUC_SCHED(dev_priv)) 478226705e20SSagar Arun Kamble dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 478326705e20SSagar Arun Kamble 4784a6706b45SDeepak S /* Let's track the enabled rps events */ 4785666a4537SWayne Boyer if (IS_VALLEYVIEW(dev_priv)) 47866c65a587SVille Syrjälä /* WaGsvRC0ResidencyMethod:vlv */ 4787e0e8c7cbSChris Wilson dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 478831685c25SDeepak S else 4789a6706b45SDeepak S dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4790a6706b45SDeepak S 4791562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz = 0; 47921800ad25SSagar Arun Kamble 47931800ad25SSagar Arun Kamble /* 4794acf2dc22SMika Kuoppala * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 47951800ad25SSagar Arun Kamble * if GEN6_PM_UP_EI_EXPIRED is masked. 47961800ad25SSagar Arun Kamble * 47971800ad25SSagar Arun Kamble * TODO: verify if this can be reproduced on VLV,CHV. 47981800ad25SSagar Arun Kamble */ 4799bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) <= 7) 4800562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 48011800ad25SSagar Arun Kamble 4802bca2bf2aSPandiyan, Dhinakaran if (INTEL_GEN(dev_priv) >= 8) 4803562d9baeSSagar Arun Kamble rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 48041800ad25SSagar Arun Kamble 4805b963291cSDaniel Vetter if (IS_GEN2(dev_priv)) { 48064194c088SRodrigo Vivi /* Gen2 doesn't have a hardware frame counter */ 48074cdb83ecSVille Syrjälä dev->max_vblank_count = 0; 4808bca2bf2aSPandiyan, Dhinakaran } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4809f71d4af4SJesse Barnes dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4810fd8f507cSVille Syrjälä dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4811391f75e2SVille Syrjälä } else { 4812391f75e2SVille Syrjälä dev->driver->get_vblank_counter = i915_get_vblank_counter; 4813391f75e2SVille Syrjälä dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4814f71d4af4SJesse Barnes } 4815f71d4af4SJesse Barnes 481621da2700SVille Syrjälä /* 481721da2700SVille Syrjälä * Opt out of the vblank disable timer on everything except gen2. 481821da2700SVille Syrjälä * Gen2 doesn't have a hardware frame counter and so depends on 481921da2700SVille Syrjälä * vblank interrupts to produce sane vblank seuquence numbers. 482021da2700SVille Syrjälä */ 4821b963291cSDaniel Vetter if (!IS_GEN2(dev_priv)) 482221da2700SVille Syrjälä dev->vblank_disable_immediate = true; 482321da2700SVille Syrjälä 4824262fd485SChris Wilson /* Most platforms treat the display irq block as an always-on 4825262fd485SChris Wilson * power domain. vlv/chv can disable it at runtime and need 4826262fd485SChris Wilson * special care to avoid writing any of the display block registers 4827262fd485SChris Wilson * outside of the power domain. We defer setting up the display irqs 4828262fd485SChris Wilson * in this case to the runtime pm. 4829262fd485SChris Wilson */ 4830262fd485SChris Wilson dev_priv->display_irqs_enabled = true; 4831262fd485SChris Wilson if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4832262fd485SChris Wilson dev_priv->display_irqs_enabled = false; 4833262fd485SChris Wilson 4834317eaa95SLyude dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4835317eaa95SLyude 48361bf6ad62SDaniel Vetter dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4837f71d4af4SJesse Barnes dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4838f71d4af4SJesse Barnes 4839b963291cSDaniel Vetter if (IS_CHERRYVIEW(dev_priv)) { 484043f328d7SVille Syrjälä dev->driver->irq_handler = cherryview_irq_handler; 48416bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = cherryview_irq_reset; 484243f328d7SVille Syrjälä dev->driver->irq_postinstall = cherryview_irq_postinstall; 48436bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = cherryview_irq_reset; 484486e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 484586e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 484643f328d7SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4847b963291cSDaniel Vetter } else if (IS_VALLEYVIEW(dev_priv)) { 48487e231dbeSJesse Barnes dev->driver->irq_handler = valleyview_irq_handler; 48496bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = valleyview_irq_reset; 48507e231dbeSJesse Barnes dev->driver->irq_postinstall = valleyview_irq_postinstall; 48516bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = valleyview_irq_reset; 485286e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 485386e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4854fa00abe0SEgbert Eich dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 485551951ae7SMika Kuoppala } else if (INTEL_GEN(dev_priv) >= 11) { 485651951ae7SMika Kuoppala dev->driver->irq_handler = gen11_irq_handler; 485751951ae7SMika Kuoppala dev->driver->irq_preinstall = gen11_irq_reset; 485851951ae7SMika Kuoppala dev->driver->irq_postinstall = gen11_irq_postinstall; 485951951ae7SMika Kuoppala dev->driver->irq_uninstall = gen11_irq_reset; 486051951ae7SMika Kuoppala dev->driver->enable_vblank = gen8_enable_vblank; 486151951ae7SMika Kuoppala dev->driver->disable_vblank = gen8_disable_vblank; 4862121e758eSDhinakaran Pandiyan dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4863bca2bf2aSPandiyan, Dhinakaran } else if (INTEL_GEN(dev_priv) >= 8) { 4864abd58f01SBen Widawsky dev->driver->irq_handler = gen8_irq_handler; 4865723761b8SDaniel Vetter dev->driver->irq_preinstall = gen8_irq_reset; 4866abd58f01SBen Widawsky dev->driver->irq_postinstall = gen8_irq_postinstall; 48676bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = gen8_irq_reset; 4868abd58f01SBen Widawsky dev->driver->enable_vblank = gen8_enable_vblank; 4869abd58f01SBen Widawsky dev->driver->disable_vblank = gen8_disable_vblank; 4870cc3f90f0SAnder Conselvan de Oliveira if (IS_GEN9_LP(dev_priv)) 4871e0a20ad7SShashank Sharma dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 48727b22b8c4SRodrigo Vivi else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 48737b22b8c4SRodrigo Vivi HAS_PCH_CNP(dev_priv)) 48746dbf30ceSVille Syrjälä dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 48756dbf30ceSVille Syrjälä else 48763a3b3c7dSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 48776e266956STvrtko Ursulin } else if (HAS_PCH_SPLIT(dev_priv)) { 4878f71d4af4SJesse Barnes dev->driver->irq_handler = ironlake_irq_handler; 4879723761b8SDaniel Vetter dev->driver->irq_preinstall = ironlake_irq_reset; 4880f71d4af4SJesse Barnes dev->driver->irq_postinstall = ironlake_irq_postinstall; 48816bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = ironlake_irq_reset; 4882f71d4af4SJesse Barnes dev->driver->enable_vblank = ironlake_enable_vblank; 4883f71d4af4SJesse Barnes dev->driver->disable_vblank = ironlake_disable_vblank; 4884e4ce95aaSVille Syrjälä dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4885f71d4af4SJesse Barnes } else { 48867e22dbbbSTvrtko Ursulin if (IS_GEN2(dev_priv)) { 48876bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i8xx_irq_reset; 4888c2798b19SChris Wilson dev->driver->irq_postinstall = i8xx_irq_postinstall; 4889c2798b19SChris Wilson dev->driver->irq_handler = i8xx_irq_handler; 48906bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i8xx_irq_reset; 489186e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 489286e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 48937e22dbbbSTvrtko Ursulin } else if (IS_GEN3(dev_priv)) { 48946bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i915_irq_reset; 4895a266c7d5SChris Wilson dev->driver->irq_postinstall = i915_irq_postinstall; 48966bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i915_irq_reset; 4897a266c7d5SChris Wilson dev->driver->irq_handler = i915_irq_handler; 489886e83e35SChris Wilson dev->driver->enable_vblank = i8xx_enable_vblank; 489986e83e35SChris Wilson dev->driver->disable_vblank = i8xx_disable_vblank; 4900c2798b19SChris Wilson } else { 49016bcdb1c8SVille Syrjälä dev->driver->irq_preinstall = i965_irq_reset; 4902a266c7d5SChris Wilson dev->driver->irq_postinstall = i965_irq_postinstall; 49036bcdb1c8SVille Syrjälä dev->driver->irq_uninstall = i965_irq_reset; 4904a266c7d5SChris Wilson dev->driver->irq_handler = i965_irq_handler; 490586e83e35SChris Wilson dev->driver->enable_vblank = i965_enable_vblank; 490686e83e35SChris Wilson dev->driver->disable_vblank = i965_disable_vblank; 4907c2798b19SChris Wilson } 4908778eb334SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv)) 4909778eb334SVille Syrjälä dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4910f71d4af4SJesse Barnes } 4911f71d4af4SJesse Barnes } 491220afbda2SDaniel Vetter 4913fca52a55SDaniel Vetter /** 4914cefcff8fSJoonas Lahtinen * intel_irq_fini - deinitializes IRQ support 4915cefcff8fSJoonas Lahtinen * @i915: i915 device instance 4916cefcff8fSJoonas Lahtinen * 4917cefcff8fSJoonas Lahtinen * This function deinitializes all the IRQ support. 4918cefcff8fSJoonas Lahtinen */ 4919cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915) 4920cefcff8fSJoonas Lahtinen { 4921cefcff8fSJoonas Lahtinen int i; 4922cefcff8fSJoonas Lahtinen 4923cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 4924cefcff8fSJoonas Lahtinen kfree(i915->l3_parity.remap_info[i]); 4925cefcff8fSJoonas Lahtinen } 4926cefcff8fSJoonas Lahtinen 4927cefcff8fSJoonas Lahtinen /** 4928fca52a55SDaniel Vetter * intel_irq_install - enables the hardware interrupt 4929fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4930fca52a55SDaniel Vetter * 4931fca52a55SDaniel Vetter * This function enables the hardware interrupt handling, but leaves the hotplug 4932fca52a55SDaniel Vetter * handling still disabled. It is called after intel_irq_init(). 4933fca52a55SDaniel Vetter * 4934fca52a55SDaniel Vetter * In the driver load and resume code we need working interrupts in a few places 4935fca52a55SDaniel Vetter * but don't want to deal with the hassle of concurrent probe and hotplug 4936fca52a55SDaniel Vetter * workers. Hence the split into this two-stage approach. 4937fca52a55SDaniel Vetter */ 49382aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv) 49392aeb7d3aSDaniel Vetter { 49402aeb7d3aSDaniel Vetter /* 49412aeb7d3aSDaniel Vetter * We enable some interrupt sources in our postinstall hooks, so mark 49422aeb7d3aSDaniel Vetter * interrupts as enabled _before_ actually enabling them to avoid 49432aeb7d3aSDaniel Vetter * special cases in our ordering checks. 49442aeb7d3aSDaniel Vetter */ 4945ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 49462aeb7d3aSDaniel Vetter 494791c8a326SChris Wilson return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 49482aeb7d3aSDaniel Vetter } 49492aeb7d3aSDaniel Vetter 4950fca52a55SDaniel Vetter /** 4951fca52a55SDaniel Vetter * intel_irq_uninstall - finilizes all irq handling 4952fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4953fca52a55SDaniel Vetter * 4954fca52a55SDaniel Vetter * This stops interrupt and hotplug handling and unregisters and frees all 4955fca52a55SDaniel Vetter * resources acquired in the init functions. 4956fca52a55SDaniel Vetter */ 49572aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv) 49582aeb7d3aSDaniel Vetter { 495991c8a326SChris Wilson drm_irq_uninstall(&dev_priv->drm); 49602aeb7d3aSDaniel Vetter intel_hpd_cancel_work(dev_priv); 4961ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 49622aeb7d3aSDaniel Vetter } 49632aeb7d3aSDaniel Vetter 4964fca52a55SDaniel Vetter /** 4965fca52a55SDaniel Vetter * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4966fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4967fca52a55SDaniel Vetter * 4968fca52a55SDaniel Vetter * This function is used to disable interrupts at runtime, both in the runtime 4969fca52a55SDaniel Vetter * pm and the system suspend/resume code. 4970fca52a55SDaniel Vetter */ 4971b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4972c67a470bSPaulo Zanoni { 497391c8a326SChris Wilson dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4974ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 497591c8a326SChris Wilson synchronize_irq(dev_priv->drm.irq); 4976c67a470bSPaulo Zanoni } 4977c67a470bSPaulo Zanoni 4978fca52a55SDaniel Vetter /** 4979fca52a55SDaniel Vetter * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4980fca52a55SDaniel Vetter * @dev_priv: i915 device instance 4981fca52a55SDaniel Vetter * 4982fca52a55SDaniel Vetter * This function is used to enable interrupts at runtime, both in the runtime 4983fca52a55SDaniel Vetter * pm and the system suspend/resume code. 4984fca52a55SDaniel Vetter */ 4985b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4986c67a470bSPaulo Zanoni { 4987ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 498891c8a326SChris Wilson dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 498991c8a326SChris Wilson dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4990c67a470bSPaulo Zanoni } 4991