1c0e09200SDave Airlie /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2c0e09200SDave Airlie */ 3c0e09200SDave Airlie /* 4c0e09200SDave Airlie * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5c0e09200SDave Airlie * All Rights Reserved. 6c0e09200SDave Airlie * 7c0e09200SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining a 8c0e09200SDave Airlie * copy of this software and associated documentation files (the 9c0e09200SDave Airlie * "Software"), to deal in the Software without restriction, including 10c0e09200SDave Airlie * without limitation the rights to use, copy, modify, merge, publish, 11c0e09200SDave Airlie * distribute, sub license, and/or sell copies of the Software, and to 12c0e09200SDave Airlie * permit persons to whom the Software is furnished to do so, subject to 13c0e09200SDave Airlie * the following conditions: 14c0e09200SDave Airlie * 15c0e09200SDave Airlie * The above copyright notice and this permission notice (including the 16c0e09200SDave Airlie * next paragraph) shall be included in all copies or substantial portions 17c0e09200SDave Airlie * of the Software. 18c0e09200SDave Airlie * 19c0e09200SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20c0e09200SDave Airlie * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21c0e09200SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22c0e09200SDave Airlie * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23c0e09200SDave Airlie * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24c0e09200SDave Airlie * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25c0e09200SDave Airlie * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26c0e09200SDave Airlie * 27c0e09200SDave Airlie */ 28c0e09200SDave Airlie 29a70491ccSJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30a70491ccSJoe Perches 3155367a27SJani Nikula #include <linux/slab.h> 3255367a27SJani Nikula #include <linux/sysrq.h> 3355367a27SJani Nikula 34fcd70cd3SDaniel Vetter #include <drm/drm_drv.h> 3555367a27SJani Nikula 362b874a02SJani Nikula #include "display/intel_display_irq.h" 371d455f8dSJani Nikula #include "display/intel_display_types.h" 38df0566a6SJani Nikula #include "display/intel_hotplug.h" 39da38ba98SJani Nikula #include "display/intel_hotplug_irq.h" 40df0566a6SJani Nikula #include "display/intel_lpe_audio.h" 417f6947fdSJani Nikula #include "display/intel_psr_regs.h" 42df0566a6SJani Nikula 43b3786b29SChris Wilson #include "gt/intel_breadcrumbs.h" 442239e6dfSDaniele Ceraolo Spurio #include "gt/intel_gt.h" 45cf1c97dcSAndi Shyti #include "gt/intel_gt_irq.h" 46d762043fSAndi Shyti #include "gt/intel_gt_pm_irq.h" 470d6419e9SMatt Roper #include "gt/intel_gt_regs.h" 483e7abf81SAndi Shyti #include "gt/intel_rps.h" 492239e6dfSDaniele Ceraolo Spurio 5024524e3fSJani Nikula #include "i915_driver.h" 51c0e09200SDave Airlie #include "i915_drv.h" 52440e2b3dSJani Nikula #include "i915_irq.h" 53476f62b8SJani Nikula #include "i915_reg.h" 54c0e09200SDave Airlie 55fca52a55SDaniel Vetter /** 56fca52a55SDaniel Vetter * DOC: interrupt handling 57fca52a55SDaniel Vetter * 58fca52a55SDaniel Vetter * These functions provide the basic support for enabling and disabling the 59fca52a55SDaniel Vetter * interrupt handling support. There's a lot more functionality in i915_irq.c 60fca52a55SDaniel Vetter * and related files, but that will be described in separate chapters. 61fca52a55SDaniel Vetter */ 62fca52a55SDaniel Vetter 639c6508b9SThomas Gleixner /* 649c6508b9SThomas Gleixner * Interrupt statistic for PMU. Increments the counter only if the 6578f48aa6SBo Liu * interrupt originated from the GPU so interrupts from a device which 669c6508b9SThomas Gleixner * shares the interrupt line are not accounted. 679c6508b9SThomas Gleixner */ 689c6508b9SThomas Gleixner static inline void pmu_irq_stats(struct drm_i915_private *i915, 699c6508b9SThomas Gleixner irqreturn_t res) 709c6508b9SThomas Gleixner { 719c6508b9SThomas Gleixner if (unlikely(res != IRQ_HANDLED)) 729c6508b9SThomas Gleixner return; 739c6508b9SThomas Gleixner 749c6508b9SThomas Gleixner /* 759c6508b9SThomas Gleixner * A clever compiler translates that into INC. A not so clever one 769c6508b9SThomas Gleixner * should at least prevent store tearing. 779c6508b9SThomas Gleixner */ 789c6508b9SThomas Gleixner WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); 799c6508b9SThomas Gleixner } 809c6508b9SThomas Gleixner 81cf1c97dcSAndi Shyti void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 8268eb49b1SPaulo Zanoni i915_reg_t iir, i915_reg_t ier) 8368eb49b1SPaulo Zanoni { 8465f42cdcSPaulo Zanoni intel_uncore_write(uncore, imr, 0xffffffff); 8565f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, imr); 8668eb49b1SPaulo Zanoni 8765f42cdcSPaulo Zanoni intel_uncore_write(uncore, ier, 0); 8868eb49b1SPaulo Zanoni 895c502442SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 9065f42cdcSPaulo Zanoni intel_uncore_write(uncore, iir, 0xffffffff); 9165f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, iir); 9265f42cdcSPaulo Zanoni intel_uncore_write(uncore, iir, 0xffffffff); 9365f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, iir); 9468eb49b1SPaulo Zanoni } 955c502442SPaulo Zanoni 96ad7632ffSJani Nikula static void gen2_irq_reset(struct intel_uncore *uncore) 9768eb49b1SPaulo Zanoni { 9865f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IMR, 0xffff); 9965f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IMR); 100a9d356a6SPaulo Zanoni 10165f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IER, 0); 10268eb49b1SPaulo Zanoni 10368eb49b1SPaulo Zanoni /* IIR can theoretically queue up two events. Be paranoid. */ 10465f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 10565f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IIR); 10665f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 10765f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IIR); 10868eb49b1SPaulo Zanoni } 10968eb49b1SPaulo Zanoni 110337ba017SPaulo Zanoni /* 111337ba017SPaulo Zanoni * We should clear IMR at preinstall/uninstall, and just check at postinstall. 112337ba017SPaulo Zanoni */ 1132b874a02SJani Nikula void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 114b51a2842SVille Syrjälä { 11565f42cdcSPaulo Zanoni u32 val = intel_uncore_read(uncore, reg); 116b51a2842SVille Syrjälä 117b51a2842SVille Syrjälä if (val == 0) 118b51a2842SVille Syrjälä return; 119b51a2842SVille Syrjälä 120a9f236d1SPankaj Bharadiya drm_WARN(&uncore->i915->drm, 1, 121a9f236d1SPankaj Bharadiya "Interrupt register 0x%x is not zero: 0x%08x\n", 122f0f59a00SVille Syrjälä i915_mmio_reg_offset(reg), val); 12365f42cdcSPaulo Zanoni intel_uncore_write(uncore, reg, 0xffffffff); 12465f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, reg); 12565f42cdcSPaulo Zanoni intel_uncore_write(uncore, reg, 0xffffffff); 12665f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, reg); 127b51a2842SVille Syrjälä } 128337ba017SPaulo Zanoni 12965f42cdcSPaulo Zanoni static void gen2_assert_iir_is_zero(struct intel_uncore *uncore) 130e9e9848aSVille Syrjälä { 13165f42cdcSPaulo Zanoni u16 val = intel_uncore_read16(uncore, GEN2_IIR); 132e9e9848aSVille Syrjälä 133e9e9848aSVille Syrjälä if (val == 0) 134e9e9848aSVille Syrjälä return; 135e9e9848aSVille Syrjälä 136a9f236d1SPankaj Bharadiya drm_WARN(&uncore->i915->drm, 1, 137a9f236d1SPankaj Bharadiya "Interrupt register 0x%x is not zero: 0x%08x\n", 1389d9523d8SPaulo Zanoni i915_mmio_reg_offset(GEN2_IIR), val); 13965f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 14065f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IIR); 14165f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IIR, 0xffff); 14265f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IIR); 143e9e9848aSVille Syrjälä } 144e9e9848aSVille Syrjälä 145cf1c97dcSAndi Shyti void gen3_irq_init(struct intel_uncore *uncore, 14668eb49b1SPaulo Zanoni i915_reg_t imr, u32 imr_val, 14768eb49b1SPaulo Zanoni i915_reg_t ier, u32 ier_val, 14868eb49b1SPaulo Zanoni i915_reg_t iir) 14968eb49b1SPaulo Zanoni { 15065f42cdcSPaulo Zanoni gen3_assert_iir_is_zero(uncore, iir); 15135079899SPaulo Zanoni 15265f42cdcSPaulo Zanoni intel_uncore_write(uncore, ier, ier_val); 15365f42cdcSPaulo Zanoni intel_uncore_write(uncore, imr, imr_val); 15465f42cdcSPaulo Zanoni intel_uncore_posting_read(uncore, imr); 15568eb49b1SPaulo Zanoni } 15635079899SPaulo Zanoni 157ad7632ffSJani Nikula static void gen2_irq_init(struct intel_uncore *uncore, 1582918c3caSPaulo Zanoni u32 imr_val, u32 ier_val) 15968eb49b1SPaulo Zanoni { 16065f42cdcSPaulo Zanoni gen2_assert_iir_is_zero(uncore); 16168eb49b1SPaulo Zanoni 16265f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IER, ier_val); 16365f42cdcSPaulo Zanoni intel_uncore_write16(uncore, GEN2_IMR, imr_val); 16465f42cdcSPaulo Zanoni intel_uncore_posting_read16(uncore, GEN2_IMR); 16568eb49b1SPaulo Zanoni } 16668eb49b1SPaulo Zanoni 167d9dc34f1SVille Syrjälä /** 16874bb98baSLucas De Marchi * ivb_parity_work - Workqueue called when a parity error interrupt 169e3689190SBen Widawsky * occurred. 170e3689190SBen Widawsky * @work: workqueue struct 171e3689190SBen Widawsky * 172e3689190SBen Widawsky * Doesn't actually do anything except notify userspace. As a consequence of 173e3689190SBen Widawsky * this event, userspace should try to remap the bad rows since statistically 174e3689190SBen Widawsky * it is likely the same row is more likely to go bad again. 175e3689190SBen Widawsky */ 17674bb98baSLucas De Marchi static void ivb_parity_work(struct work_struct *work) 177e3689190SBen Widawsky { 1782d1013ddSJani Nikula struct drm_i915_private *dev_priv = 179cefcff8fSJoonas Lahtinen container_of(work, typeof(*dev_priv), l3_parity.error_work); 1802cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(dev_priv); 181e3689190SBen Widawsky u32 error_status, row, bank, subbank; 18235a85ac6SBen Widawsky char *parity_event[6]; 183a9c287c9SJani Nikula u32 misccpctl; 184a9c287c9SJani Nikula u8 slice = 0; 185e3689190SBen Widawsky 186e3689190SBen Widawsky /* We must turn off DOP level clock gating to access the L3 registers. 187e3689190SBen Widawsky * In order to prevent a get/put style interface, acquire struct mutex 188e3689190SBen Widawsky * any time we access those registers. 189e3689190SBen Widawsky */ 19091c8a326SChris Wilson mutex_lock(&dev_priv->drm.struct_mutex); 191e3689190SBen Widawsky 19235a85ac6SBen Widawsky /* If we've screwed up tracking, just let the interrupt fire again */ 19348a1b8d4SPankaj Bharadiya if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) 19435a85ac6SBen Widawsky goto out; 19535a85ac6SBen Widawsky 196f7435467SAndrzej Hajda misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL, 197f7435467SAndrzej Hajda GEN7_DOP_CLOCK_GATE_ENABLE, 0); 1982939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); 199e3689190SBen Widawsky 20035a85ac6SBen Widawsky while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 201f0f59a00SVille Syrjälä i915_reg_t reg; 20235a85ac6SBen Widawsky 20335a85ac6SBen Widawsky slice--; 20448a1b8d4SPankaj Bharadiya if (drm_WARN_ON_ONCE(&dev_priv->drm, 20548a1b8d4SPankaj Bharadiya slice >= NUM_L3_SLICES(dev_priv))) 20635a85ac6SBen Widawsky break; 20735a85ac6SBen Widawsky 20835a85ac6SBen Widawsky dev_priv->l3_parity.which_slice &= ~(1<<slice); 20935a85ac6SBen Widawsky 2106fa1c5f1SVille Syrjälä reg = GEN7_L3CDERRST1(slice); 21135a85ac6SBen Widawsky 2122939eb06SJani Nikula error_status = intel_uncore_read(&dev_priv->uncore, reg); 213e3689190SBen Widawsky row = GEN7_PARITY_ERROR_ROW(error_status); 214e3689190SBen Widawsky bank = GEN7_PARITY_ERROR_BANK(error_status); 215e3689190SBen Widawsky subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 216e3689190SBen Widawsky 2172939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 2182939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, reg); 219e3689190SBen Widawsky 220cce723edSBen Widawsky parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 221e3689190SBen Widawsky parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 222e3689190SBen Widawsky parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 223e3689190SBen Widawsky parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 22435a85ac6SBen Widawsky parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 22535a85ac6SBen Widawsky parity_event[5] = NULL; 226e3689190SBen Widawsky 22791c8a326SChris Wilson kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 228e3689190SBen Widawsky KOBJ_CHANGE, parity_event); 229e3689190SBen Widawsky 230a10234fdSTvrtko Ursulin drm_dbg(&dev_priv->drm, 231a10234fdSTvrtko Ursulin "Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 23235a85ac6SBen Widawsky slice, row, bank, subbank); 233e3689190SBen Widawsky 23435a85ac6SBen Widawsky kfree(parity_event[4]); 235e3689190SBen Widawsky kfree(parity_event[3]); 236e3689190SBen Widawsky kfree(parity_event[2]); 237e3689190SBen Widawsky kfree(parity_event[1]); 238e3689190SBen Widawsky } 239e3689190SBen Widawsky 2402939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); 24135a85ac6SBen Widawsky 24235a85ac6SBen Widawsky out: 24348a1b8d4SPankaj Bharadiya drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); 24403d2c54dSMatt Roper spin_lock_irq(gt->irq_lock); 245cf1c97dcSAndi Shyti gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv)); 24603d2c54dSMatt Roper spin_unlock_irq(gt->irq_lock); 24735a85ac6SBen Widawsky 24891c8a326SChris Wilson mutex_unlock(&dev_priv->drm.struct_mutex); 24935a85ac6SBen Widawsky } 25035a85ac6SBen Widawsky 251c1874ed7SImre Deak static irqreturn_t valleyview_irq_handler(int irq, void *arg) 252c1874ed7SImre Deak { 253b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 254c1874ed7SImre Deak irqreturn_t ret = IRQ_NONE; 255c1874ed7SImre Deak 2562dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 2572dd2a883SImre Deak return IRQ_NONE; 2582dd2a883SImre Deak 2591f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2609102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 2611f814dacSImre Deak 2621e1cace9SVille Syrjälä do { 2636e814800SVille Syrjälä u32 iir, gt_iir, pm_iir; 2642ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 2651ae3c34cSVille Syrjälä u32 hotplug_status = 0; 266a5e485a9SVille Syrjälä u32 ier = 0; 2673ff60f89SOscar Mateo 2682939eb06SJani Nikula gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); 2692939eb06SJani Nikula pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); 2702939eb06SJani Nikula iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 271c1874ed7SImre Deak 272c1874ed7SImre Deak if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2731e1cace9SVille Syrjälä break; 274c1874ed7SImre Deak 275c1874ed7SImre Deak ret = IRQ_HANDLED; 276c1874ed7SImre Deak 277a5e485a9SVille Syrjälä /* 278a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 279a5e485a9SVille Syrjälä * 280a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 281a5e485a9SVille Syrjälä * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 282a5e485a9SVille Syrjälä * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 283a5e485a9SVille Syrjälä * 284a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 285a5e485a9SVille Syrjälä * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 286a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 287a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 288a5e485a9SVille Syrjälä * bits this time around. 289a5e485a9SVille Syrjälä */ 2902939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 2918cee664dSAndrzej Hajda ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 2924a0a0202SVille Syrjälä 2934a0a0202SVille Syrjälä if (gt_iir) 2942939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); 2954a0a0202SVille Syrjälä if (pm_iir) 2962939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); 2974a0a0202SVille Syrjälä 2987ce4d1f2SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 2991ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3007ce4d1f2SVille Syrjälä 3013ff60f89SOscar Mateo /* Call regardless, as some status bits might not be 3023ff60f89SOscar Mateo * signalled in iir */ 303eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3047ce4d1f2SVille Syrjälä 305eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 306eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT)) 307eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 308eef57324SJerome Anand 3097ce4d1f2SVille Syrjälä /* 3107ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 3117ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 3127ce4d1f2SVille Syrjälä */ 3137ce4d1f2SVille Syrjälä if (iir) 3142939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 3154a0a0202SVille Syrjälä 3162939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 3172939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3181ae3c34cSVille Syrjälä 31952894874SVille Syrjälä if (gt_iir) 3202cbc876dSMichał Winiarski gen6_gt_irq_handler(to_gt(dev_priv), gt_iir); 32152894874SVille Syrjälä if (pm_iir) 3222cbc876dSMichał Winiarski gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); 32352894874SVille Syrjälä 3241ae3c34cSVille Syrjälä if (hotplug_status) 32591d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3262ecb8ca4SVille Syrjälä 32791d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 3281e1cace9SVille Syrjälä } while (0); 3297e231dbeSJesse Barnes 3309c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, ret); 3319c6508b9SThomas Gleixner 3329102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3331f814dacSImre Deak 3347e231dbeSJesse Barnes return ret; 3357e231dbeSJesse Barnes } 3367e231dbeSJesse Barnes 33743f328d7SVille Syrjälä static irqreturn_t cherryview_irq_handler(int irq, void *arg) 33843f328d7SVille Syrjälä { 339b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 34043f328d7SVille Syrjälä irqreturn_t ret = IRQ_NONE; 34143f328d7SVille Syrjälä 3422dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 3432dd2a883SImre Deak return IRQ_NONE; 3442dd2a883SImre Deak 3451f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3469102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 3471f814dacSImre Deak 348579de73bSChris Wilson do { 3496e814800SVille Syrjälä u32 master_ctl, iir; 3502ecb8ca4SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 3511ae3c34cSVille Syrjälä u32 hotplug_status = 0; 352a5e485a9SVille Syrjälä u32 ier = 0; 353a5e485a9SVille Syrjälä 3542939eb06SJani Nikula master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 3552939eb06SJani Nikula iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); 3563278f67fSVille Syrjälä 3573278f67fSVille Syrjälä if (master_ctl == 0 && iir == 0) 3588e5fd599SVille Syrjälä break; 35943f328d7SVille Syrjälä 36027b6c122SOscar Mateo ret = IRQ_HANDLED; 36127b6c122SOscar Mateo 362a5e485a9SVille Syrjälä /* 363a5e485a9SVille Syrjälä * Theory on interrupt generation, based on empirical evidence: 364a5e485a9SVille Syrjälä * 365a5e485a9SVille Syrjälä * x = ((VLV_IIR & VLV_IER) || 366a5e485a9SVille Syrjälä * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 367a5e485a9SVille Syrjälä * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 368a5e485a9SVille Syrjälä * 369a5e485a9SVille Syrjälä * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 370a5e485a9SVille Syrjälä * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 371a5e485a9SVille Syrjälä * guarantee the CPU interrupt will be raised again even if we 372a5e485a9SVille Syrjälä * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 373a5e485a9SVille Syrjälä * bits this time around. 374a5e485a9SVille Syrjälä */ 3752939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); 3768cee664dSAndrzej Hajda ier = intel_uncore_rmw(&dev_priv->uncore, VLV_IER, ~0, 0); 37743f328d7SVille Syrjälä 3782cbc876dSMichał Winiarski gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 37927b6c122SOscar Mateo 38027b6c122SOscar Mateo if (iir & I915_DISPLAY_PORT_INTERRUPT) 3811ae3c34cSVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 38243f328d7SVille Syrjälä 38327b6c122SOscar Mateo /* Call regardless, as some status bits might not be 38427b6c122SOscar Mateo * signalled in iir */ 385eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 38643f328d7SVille Syrjälä 387eef57324SJerome Anand if (iir & (I915_LPE_PIPE_A_INTERRUPT | 388eef57324SJerome Anand I915_LPE_PIPE_B_INTERRUPT | 389eef57324SJerome Anand I915_LPE_PIPE_C_INTERRUPT)) 390eef57324SJerome Anand intel_lpe_audio_irq_handler(dev_priv); 391eef57324SJerome Anand 3927ce4d1f2SVille Syrjälä /* 3937ce4d1f2SVille Syrjälä * VLV_IIR is single buffered, and reflects the level 3947ce4d1f2SVille Syrjälä * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 3957ce4d1f2SVille Syrjälä */ 3967ce4d1f2SVille Syrjälä if (iir) 3972939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); 3987ce4d1f2SVille Syrjälä 3992939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); 4002939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4011ae3c34cSVille Syrjälä 4021ae3c34cSVille Syrjälä if (hotplug_status) 40391d14251STvrtko Ursulin i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4042ecb8ca4SVille Syrjälä 40591d14251STvrtko Ursulin valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 406579de73bSChris Wilson } while (0); 4073278f67fSVille Syrjälä 4089c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, ret); 4099c6508b9SThomas Gleixner 4109102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 4111f814dacSImre Deak 41243f328d7SVille Syrjälä return ret; 41343f328d7SVille Syrjälä } 41443f328d7SVille Syrjälä 41572c90f62SOscar Mateo /* 41672c90f62SOscar Mateo * To handle irqs with the minimum potential races with fresh interrupts, we: 41772c90f62SOscar Mateo * 1 - Disable Master Interrupt Control. 41872c90f62SOscar Mateo * 2 - Find the source(s) of the interrupt. 41972c90f62SOscar Mateo * 3 - Clear the Interrupt Identity bits (IIR). 42072c90f62SOscar Mateo * 4 - Process the interrupt(s) that had bits set in the IIRs. 42172c90f62SOscar Mateo * 5 - Re-enable Master Interrupt Control. 42272c90f62SOscar Mateo */ 4239eae5e27SLucas De Marchi static irqreturn_t ilk_irq_handler(int irq, void *arg) 424b1f14ad0SJesse Barnes { 425c48a798aSChris Wilson struct drm_i915_private *i915 = arg; 426*72e9abc3SJani Nikula void __iomem * const regs = intel_uncore_regs(&i915->uncore); 427f1af8fc1SPaulo Zanoni u32 de_iir, gt_iir, de_ier, sde_ier = 0; 4280e43406bSChris Wilson irqreturn_t ret = IRQ_NONE; 429b1f14ad0SJesse Barnes 430c48a798aSChris Wilson if (unlikely(!intel_irqs_enabled(i915))) 4312dd2a883SImre Deak return IRQ_NONE; 4322dd2a883SImre Deak 4331f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 434c48a798aSChris Wilson disable_rpm_wakeref_asserts(&i915->runtime_pm); 4351f814dacSImre Deak 436b1f14ad0SJesse Barnes /* disable master interrupt before clearing iir */ 437c48a798aSChris Wilson de_ier = raw_reg_read(regs, DEIER); 438c48a798aSChris Wilson raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 4390e43406bSChris Wilson 44044498aeaSPaulo Zanoni /* Disable south interrupts. We'll only write to SDEIIR once, so further 44144498aeaSPaulo Zanoni * interrupts will will be stored on its back queue, and then we'll be 44244498aeaSPaulo Zanoni * able to process them after we restore SDEIER (as soon as we restore 44344498aeaSPaulo Zanoni * it, we'll get an interrupt if SDEIIR still has something to process 44444498aeaSPaulo Zanoni * due to its back queue). */ 445c48a798aSChris Wilson if (!HAS_PCH_NOP(i915)) { 446c48a798aSChris Wilson sde_ier = raw_reg_read(regs, SDEIER); 447c48a798aSChris Wilson raw_reg_write(regs, SDEIER, 0); 448ab5c608bSBen Widawsky } 44944498aeaSPaulo Zanoni 45072c90f62SOscar Mateo /* Find, clear, then process each source of interrupt */ 45172c90f62SOscar Mateo 452c48a798aSChris Wilson gt_iir = raw_reg_read(regs, GTIIR); 4530e43406bSChris Wilson if (gt_iir) { 454c48a798aSChris Wilson raw_reg_write(regs, GTIIR, gt_iir); 455651e7d48SLucas De Marchi if (GRAPHICS_VER(i915) >= 6) 4562cbc876dSMichał Winiarski gen6_gt_irq_handler(to_gt(i915), gt_iir); 457d8fc8a47SPaulo Zanoni else 4582cbc876dSMichał Winiarski gen5_gt_irq_handler(to_gt(i915), gt_iir); 459c48a798aSChris Wilson ret = IRQ_HANDLED; 4600e43406bSChris Wilson } 461b1f14ad0SJesse Barnes 462c48a798aSChris Wilson de_iir = raw_reg_read(regs, DEIIR); 4630e43406bSChris Wilson if (de_iir) { 464c48a798aSChris Wilson raw_reg_write(regs, DEIIR, de_iir); 465373abf1aSMatt Roper if (DISPLAY_VER(i915) >= 7) 466c48a798aSChris Wilson ivb_display_irq_handler(i915, de_iir); 467f1af8fc1SPaulo Zanoni else 468c48a798aSChris Wilson ilk_display_irq_handler(i915, de_iir); 4690e43406bSChris Wilson ret = IRQ_HANDLED; 470c48a798aSChris Wilson } 471c48a798aSChris Wilson 472651e7d48SLucas De Marchi if (GRAPHICS_VER(i915) >= 6) { 473c48a798aSChris Wilson u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR); 474c48a798aSChris Wilson if (pm_iir) { 475c48a798aSChris Wilson raw_reg_write(regs, GEN6_PMIIR, pm_iir); 4762cbc876dSMichał Winiarski gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir); 477c48a798aSChris Wilson ret = IRQ_HANDLED; 4780e43406bSChris Wilson } 479f1af8fc1SPaulo Zanoni } 480b1f14ad0SJesse Barnes 481c48a798aSChris Wilson raw_reg_write(regs, DEIER, de_ier); 482c48a798aSChris Wilson if (sde_ier) 483c48a798aSChris Wilson raw_reg_write(regs, SDEIER, sde_ier); 484b1f14ad0SJesse Barnes 4859c6508b9SThomas Gleixner pmu_irq_stats(i915, ret); 4869c6508b9SThomas Gleixner 4871f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 488c48a798aSChris Wilson enable_rpm_wakeref_asserts(&i915->runtime_pm); 4891f814dacSImre Deak 490b1f14ad0SJesse Barnes return ret; 491b1f14ad0SJesse Barnes } 492b1f14ad0SJesse Barnes 4934376b9c9SMika Kuoppala static inline u32 gen8_master_intr_disable(void __iomem * const regs) 4944376b9c9SMika Kuoppala { 4954376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, 0); 4964376b9c9SMika Kuoppala 4974376b9c9SMika Kuoppala /* 4984376b9c9SMika Kuoppala * Now with master disabled, get a sample of level indications 4994376b9c9SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 5004376b9c9SMika Kuoppala * New indications can and will light up during processing, 5014376b9c9SMika Kuoppala * and will generate new interrupt after enabling master. 5024376b9c9SMika Kuoppala */ 5034376b9c9SMika Kuoppala return raw_reg_read(regs, GEN8_MASTER_IRQ); 5044376b9c9SMika Kuoppala } 5054376b9c9SMika Kuoppala 5064376b9c9SMika Kuoppala static inline void gen8_master_intr_enable(void __iomem * const regs) 5074376b9c9SMika Kuoppala { 5084376b9c9SMika Kuoppala raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 5094376b9c9SMika Kuoppala } 5104376b9c9SMika Kuoppala 511f11a0f46STvrtko Ursulin static irqreturn_t gen8_irq_handler(int irq, void *arg) 512f11a0f46STvrtko Ursulin { 513b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 514*72e9abc3SJani Nikula void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); 515f11a0f46STvrtko Ursulin u32 master_ctl; 516f11a0f46STvrtko Ursulin 517f11a0f46STvrtko Ursulin if (!intel_irqs_enabled(dev_priv)) 518f11a0f46STvrtko Ursulin return IRQ_NONE; 519f11a0f46STvrtko Ursulin 5204376b9c9SMika Kuoppala master_ctl = gen8_master_intr_disable(regs); 5214376b9c9SMika Kuoppala if (!master_ctl) { 5224376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 523f11a0f46STvrtko Ursulin return IRQ_NONE; 5244376b9c9SMika Kuoppala } 525f11a0f46STvrtko Ursulin 5266cc32f15SChris Wilson /* Find, queue (onto bottom-halves), then clear each source */ 5272cbc876dSMichał Winiarski gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); 528f0fd96f5SChris Wilson 529f0fd96f5SChris Wilson /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 530f0fd96f5SChris Wilson if (master_ctl & ~GEN8_GT_IRQS) { 5319102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 53255ef72f2SChris Wilson gen8_de_irq_handler(dev_priv, master_ctl); 5339102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 534f0fd96f5SChris Wilson } 535f11a0f46STvrtko Ursulin 5364376b9c9SMika Kuoppala gen8_master_intr_enable(regs); 537abd58f01SBen Widawsky 5389c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, IRQ_HANDLED); 5399c6508b9SThomas Gleixner 54055ef72f2SChris Wilson return IRQ_HANDLED; 541abd58f01SBen Widawsky } 542abd58f01SBen Widawsky 54381067b71SMika Kuoppala static inline u32 gen11_master_intr_disable(void __iomem * const regs) 54481067b71SMika Kuoppala { 54581067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 54681067b71SMika Kuoppala 54781067b71SMika Kuoppala /* 54881067b71SMika Kuoppala * Now with master disabled, get a sample of level indications 54981067b71SMika Kuoppala * for this interrupt. Indications will be cleared on related acks. 55081067b71SMika Kuoppala * New indications can and will light up during processing, 55181067b71SMika Kuoppala * and will generate new interrupt after enabling master. 55281067b71SMika Kuoppala */ 55381067b71SMika Kuoppala return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 55481067b71SMika Kuoppala } 55581067b71SMika Kuoppala 55681067b71SMika Kuoppala static inline void gen11_master_intr_enable(void __iomem * const regs) 55781067b71SMika Kuoppala { 55881067b71SMika Kuoppala raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 55981067b71SMika Kuoppala } 56081067b71SMika Kuoppala 56122e26af7SPaulo Zanoni static irqreturn_t gen11_irq_handler(int irq, void *arg) 56251951ae7SMika Kuoppala { 56322e26af7SPaulo Zanoni struct drm_i915_private *i915 = arg; 564*72e9abc3SJani Nikula void __iomem * const regs = intel_uncore_regs(&i915->uncore); 5652cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(i915); 56651951ae7SMika Kuoppala u32 master_ctl; 567df0d28c1SDhinakaran Pandiyan u32 gu_misc_iir; 56851951ae7SMika Kuoppala 56951951ae7SMika Kuoppala if (!intel_irqs_enabled(i915)) 57051951ae7SMika Kuoppala return IRQ_NONE; 57151951ae7SMika Kuoppala 57222e26af7SPaulo Zanoni master_ctl = gen11_master_intr_disable(regs); 57381067b71SMika Kuoppala if (!master_ctl) { 57422e26af7SPaulo Zanoni gen11_master_intr_enable(regs); 57551951ae7SMika Kuoppala return IRQ_NONE; 57681067b71SMika Kuoppala } 57751951ae7SMika Kuoppala 5786cc32f15SChris Wilson /* Find, queue (onto bottom-halves), then clear each source */ 5799b77011eSTvrtko Ursulin gen11_gt_irq_handler(gt, master_ctl); 58051951ae7SMika Kuoppala 58151951ae7SMika Kuoppala /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 582a3265d85SMatt Roper if (master_ctl & GEN11_DISPLAY_IRQ) 583a3265d85SMatt Roper gen11_display_irq_handler(i915); 58451951ae7SMika Kuoppala 585ddcf980fSAnusha Srivatsa gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 586df0d28c1SDhinakaran Pandiyan 58722e26af7SPaulo Zanoni gen11_master_intr_enable(regs); 58851951ae7SMika Kuoppala 589ddcf980fSAnusha Srivatsa gen11_gu_misc_irq_handler(i915, gu_misc_iir); 590df0d28c1SDhinakaran Pandiyan 5919c6508b9SThomas Gleixner pmu_irq_stats(i915, IRQ_HANDLED); 5929c6508b9SThomas Gleixner 59351951ae7SMika Kuoppala return IRQ_HANDLED; 59451951ae7SMika Kuoppala } 59551951ae7SMika Kuoppala 59622e26af7SPaulo Zanoni static inline u32 dg1_master_intr_disable(void __iomem * const regs) 59797b492f5SLucas De Marchi { 59897b492f5SLucas De Marchi u32 val; 59997b492f5SLucas De Marchi 60097b492f5SLucas De Marchi /* First disable interrupts */ 60122e26af7SPaulo Zanoni raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0); 60297b492f5SLucas De Marchi 60397b492f5SLucas De Marchi /* Get the indication levels and ack the master unit */ 60422e26af7SPaulo Zanoni val = raw_reg_read(regs, DG1_MSTR_TILE_INTR); 60597b492f5SLucas De Marchi if (unlikely(!val)) 60697b492f5SLucas De Marchi return 0; 60797b492f5SLucas De Marchi 60822e26af7SPaulo Zanoni raw_reg_write(regs, DG1_MSTR_TILE_INTR, val); 60997b492f5SLucas De Marchi 61097b492f5SLucas De Marchi return val; 61197b492f5SLucas De Marchi } 61297b492f5SLucas De Marchi 61397b492f5SLucas De Marchi static inline void dg1_master_intr_enable(void __iomem * const regs) 61497b492f5SLucas De Marchi { 61522e26af7SPaulo Zanoni raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ); 61697b492f5SLucas De Marchi } 61797b492f5SLucas De Marchi 61897b492f5SLucas De Marchi static irqreturn_t dg1_irq_handler(int irq, void *arg) 61997b492f5SLucas De Marchi { 62022e26af7SPaulo Zanoni struct drm_i915_private * const i915 = arg; 6212cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(i915); 622*72e9abc3SJani Nikula void __iomem * const regs = intel_uncore_regs(gt->uncore); 62322e26af7SPaulo Zanoni u32 master_tile_ctl, master_ctl; 62422e26af7SPaulo Zanoni u32 gu_misc_iir; 62522e26af7SPaulo Zanoni 62622e26af7SPaulo Zanoni if (!intel_irqs_enabled(i915)) 62722e26af7SPaulo Zanoni return IRQ_NONE; 62822e26af7SPaulo Zanoni 62922e26af7SPaulo Zanoni master_tile_ctl = dg1_master_intr_disable(regs); 63022e26af7SPaulo Zanoni if (!master_tile_ctl) { 63122e26af7SPaulo Zanoni dg1_master_intr_enable(regs); 63222e26af7SPaulo Zanoni return IRQ_NONE; 63322e26af7SPaulo Zanoni } 63422e26af7SPaulo Zanoni 63522e26af7SPaulo Zanoni /* FIXME: we only support tile 0 for now. */ 63622e26af7SPaulo Zanoni if (master_tile_ctl & DG1_MSTR_TILE(0)) { 63722e26af7SPaulo Zanoni master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 63822e26af7SPaulo Zanoni raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl); 63922e26af7SPaulo Zanoni } else { 640a10234fdSTvrtko Ursulin drm_err(&i915->drm, "Tile not supported: 0x%08x\n", 641a10234fdSTvrtko Ursulin master_tile_ctl); 64222e26af7SPaulo Zanoni dg1_master_intr_enable(regs); 64322e26af7SPaulo Zanoni return IRQ_NONE; 64422e26af7SPaulo Zanoni } 64522e26af7SPaulo Zanoni 64622e26af7SPaulo Zanoni gen11_gt_irq_handler(gt, master_ctl); 64722e26af7SPaulo Zanoni 64822e26af7SPaulo Zanoni if (master_ctl & GEN11_DISPLAY_IRQ) 64922e26af7SPaulo Zanoni gen11_display_irq_handler(i915); 65022e26af7SPaulo Zanoni 651ddcf980fSAnusha Srivatsa gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 65222e26af7SPaulo Zanoni 65322e26af7SPaulo Zanoni dg1_master_intr_enable(regs); 65422e26af7SPaulo Zanoni 655ddcf980fSAnusha Srivatsa gen11_gu_misc_irq_handler(i915, gu_misc_iir); 65622e26af7SPaulo Zanoni 65722e26af7SPaulo Zanoni pmu_irq_stats(i915, IRQ_HANDLED); 65822e26af7SPaulo Zanoni 65922e26af7SPaulo Zanoni return IRQ_HANDLED; 66097b492f5SLucas De Marchi } 66197b492f5SLucas De Marchi 662b243f530STvrtko Ursulin static void ibx_irq_reset(struct drm_i915_private *dev_priv) 66391738a95SPaulo Zanoni { 664b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 665b16b2a2fSPaulo Zanoni 6666e266956STvrtko Ursulin if (HAS_PCH_NOP(dev_priv)) 66791738a95SPaulo Zanoni return; 66891738a95SPaulo Zanoni 669b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, SDE); 670105b122eSPaulo Zanoni 6716e266956STvrtko Ursulin if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 6722939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); 673622364b6SPaulo Zanoni } 674105b122eSPaulo Zanoni 6758bb61306SVille Syrjälä /* drm_dma.h hooks 6768bb61306SVille Syrjälä */ 6779eae5e27SLucas De Marchi static void ilk_irq_reset(struct drm_i915_private *dev_priv) 6788bb61306SVille Syrjälä { 679b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 6808bb61306SVille Syrjälä 681b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, DE); 682e44adb5dSChris Wilson dev_priv->irq_mask = ~0u; 683e44adb5dSChris Wilson 684651e7d48SLucas De Marchi if (GRAPHICS_VER(dev_priv) == 7) 685f0818984STvrtko Ursulin intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff); 6868bb61306SVille Syrjälä 687fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 688f0818984STvrtko Ursulin intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 689f0818984STvrtko Ursulin intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 690fc340442SDaniel Vetter } 691fc340442SDaniel Vetter 6922cbc876dSMichał Winiarski gen5_gt_irq_reset(to_gt(dev_priv)); 6938bb61306SVille Syrjälä 694b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 6958bb61306SVille Syrjälä } 6968bb61306SVille Syrjälä 697b318b824SVille Syrjälä static void valleyview_irq_reset(struct drm_i915_private *dev_priv) 6987e231dbeSJesse Barnes { 6992939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); 7002939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 70134c7b8a7SVille Syrjälä 7022cbc876dSMichał Winiarski gen5_gt_irq_reset(to_gt(dev_priv)); 7037e231dbeSJesse Barnes 704ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 7059918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 70670591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 707ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 7087e231dbeSJesse Barnes } 7097e231dbeSJesse Barnes 710a844cfbeSJosé Roberto de Souza static void gen8_irq_reset(struct drm_i915_private *dev_priv) 711a844cfbeSJosé Roberto de Souza { 712a844cfbeSJosé Roberto de Souza struct intel_uncore *uncore = &dev_priv->uncore; 713a844cfbeSJosé Roberto de Souza 714*72e9abc3SJani Nikula gen8_master_intr_disable(intel_uncore_regs(uncore)); 715a844cfbeSJosé Roberto de Souza 7162cbc876dSMichał Winiarski gen8_gt_irq_reset(to_gt(dev_priv)); 717a844cfbeSJosé Roberto de Souza gen8_display_irq_reset(dev_priv); 718b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN8_PCU_); 719abd58f01SBen Widawsky 7206e266956STvrtko Ursulin if (HAS_PCH_SPLIT(dev_priv)) 721b243f530STvrtko Ursulin ibx_irq_reset(dev_priv); 72259b7cb44STejas Upadhyay 723abd58f01SBen Widawsky } 724abd58f01SBen Widawsky 725a3265d85SMatt Roper static void gen11_irq_reset(struct drm_i915_private *dev_priv) 726a3265d85SMatt Roper { 7272cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(dev_priv); 728fd4d7904SPaulo Zanoni struct intel_uncore *uncore = gt->uncore; 729a3265d85SMatt Roper 730*72e9abc3SJani Nikula gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); 731a3265d85SMatt Roper 732fd4d7904SPaulo Zanoni gen11_gt_irq_reset(gt); 733a3265d85SMatt Roper gen11_display_irq_reset(dev_priv); 734a3265d85SMatt Roper 735a3265d85SMatt Roper GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 736a3265d85SMatt Roper GEN3_IRQ_RESET(uncore, GEN8_PCU_); 737a3265d85SMatt Roper } 738a3265d85SMatt Roper 73922e26af7SPaulo Zanoni static void dg1_irq_reset(struct drm_i915_private *dev_priv) 74022e26af7SPaulo Zanoni { 741d1f3b5e9SAndi Shyti struct intel_uncore *uncore = &dev_priv->uncore; 742d1f3b5e9SAndi Shyti struct intel_gt *gt; 743d1f3b5e9SAndi Shyti unsigned int i; 74422e26af7SPaulo Zanoni 745*72e9abc3SJani Nikula dg1_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); 74622e26af7SPaulo Zanoni 747d1f3b5e9SAndi Shyti for_each_gt(gt, dev_priv, i) 748fd4d7904SPaulo Zanoni gen11_gt_irq_reset(gt); 749d1f3b5e9SAndi Shyti 75022e26af7SPaulo Zanoni gen11_display_irq_reset(dev_priv); 75122e26af7SPaulo Zanoni 75222e26af7SPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); 75322e26af7SPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN8_PCU_); 75422e26af7SPaulo Zanoni } 75522e26af7SPaulo Zanoni 756b318b824SVille Syrjälä static void cherryview_irq_reset(struct drm_i915_private *dev_priv) 75743f328d7SVille Syrjälä { 758b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 75943f328d7SVille Syrjälä 760e58c2cacSAndrzej Hajda intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); 7612939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 76243f328d7SVille Syrjälä 7632cbc876dSMichał Winiarski gen8_gt_irq_reset(to_gt(dev_priv)); 76443f328d7SVille Syrjälä 765b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN8_PCU_); 76643f328d7SVille Syrjälä 767ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 7689918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 76970591a41SVille Syrjälä vlv_display_irq_reset(dev_priv); 770ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 77143f328d7SVille Syrjälä } 77243f328d7SVille Syrjälä 7739eae5e27SLucas De Marchi static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) 774036a4a7dSZhenyu Wang { 775b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 7768e76f8dcSPaulo Zanoni u32 display_mask, extra_mask; 7778e76f8dcSPaulo Zanoni 778651e7d48SLucas De Marchi if (GRAPHICS_VER(dev_priv) >= 7) { 7798e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 780842ebf7aSVille Syrjälä DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 7818e76f8dcSPaulo Zanoni extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 78223bb4cb5SVille Syrjälä DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 7832a636e24SVille Syrjälä DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 7842a636e24SVille Syrjälä DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 7852a636e24SVille Syrjälä DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 78623bb4cb5SVille Syrjälä DE_DP_A_HOTPLUG_IVB); 7878e76f8dcSPaulo Zanoni } else { 7888e76f8dcSPaulo Zanoni display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 789842ebf7aSVille Syrjälä DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 790842ebf7aSVille Syrjälä DE_PIPEA_CRC_DONE | DE_POISON); 791c6073d4cSVille Syrjälä extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 792e4ce95aaSVille Syrjälä DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 7934bb18054SVille Syrjälä DE_PLANE_FLIP_DONE(PLANE_A) | 7944bb18054SVille Syrjälä DE_PLANE_FLIP_DONE(PLANE_B) | 795e4ce95aaSVille Syrjälä DE_DP_A_HOTPLUG); 7968e76f8dcSPaulo Zanoni } 797036a4a7dSZhenyu Wang 798fc340442SDaniel Vetter if (IS_HASWELL(dev_priv)) { 799b16b2a2fSPaulo Zanoni gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 800fc340442SDaniel Vetter display_mask |= DE_EDP_PSR_INT_HSW; 801fc340442SDaniel Vetter } 802fc340442SDaniel Vetter 803c6073d4cSVille Syrjälä if (IS_IRONLAKE_M(dev_priv)) 804c6073d4cSVille Syrjälä extra_mask |= DE_PCU_EVENT; 805c6073d4cSVille Syrjälä 8061ec14ad3SChris Wilson dev_priv->irq_mask = ~display_mask; 807036a4a7dSZhenyu Wang 808a0a6d8cbSVille Syrjälä ibx_irq_postinstall(dev_priv); 809622364b6SPaulo Zanoni 8102cbc876dSMichał Winiarski gen5_gt_irq_postinstall(to_gt(dev_priv)); 811a9922912SVille Syrjälä 812b16b2a2fSPaulo Zanoni GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask, 813b16b2a2fSPaulo Zanoni display_mask | extra_mask); 814036a4a7dSZhenyu Wang } 815036a4a7dSZhenyu Wang 816b318b824SVille Syrjälä static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) 8170e6c9a9eSVille Syrjälä { 8182cbc876dSMichał Winiarski gen5_gt_irq_postinstall(to_gt(dev_priv)); 8197e231dbeSJesse Barnes 820ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 8219918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 822ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 823ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 824ad22d106SVille Syrjälä 8252939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 8262939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); 82720afbda2SDaniel Vetter } 82820afbda2SDaniel Vetter 829b318b824SVille Syrjälä static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) 830abd58f01SBen Widawsky { 83159b7cb44STejas Upadhyay if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 83259b7cb44STejas Upadhyay icp_irq_postinstall(dev_priv); 83359b7cb44STejas Upadhyay else if (HAS_PCH_SPLIT(dev_priv)) 834a0a6d8cbSVille Syrjälä ibx_irq_postinstall(dev_priv); 835622364b6SPaulo Zanoni 8362cbc876dSMichał Winiarski gen8_gt_irq_postinstall(to_gt(dev_priv)); 837abd58f01SBen Widawsky gen8_de_irq_postinstall(dev_priv); 838abd58f01SBen Widawsky 839*72e9abc3SJani Nikula gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); 840abd58f01SBen Widawsky } 841abd58f01SBen Widawsky 842b318b824SVille Syrjälä static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) 84351951ae7SMika Kuoppala { 8442cbc876dSMichał Winiarski struct intel_gt *gt = to_gt(dev_priv); 845fd4d7904SPaulo Zanoni struct intel_uncore *uncore = gt->uncore; 846df0d28c1SDhinakaran Pandiyan u32 gu_misc_masked = GEN11_GU_MISC_GSE; 84751951ae7SMika Kuoppala 84829b43ae2SRodrigo Vivi if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 849b318b824SVille Syrjälä icp_irq_postinstall(dev_priv); 85031604222SAnusha Srivatsa 851fd4d7904SPaulo Zanoni gen11_gt_irq_postinstall(gt); 852a844cfbeSJosé Roberto de Souza gen11_de_irq_postinstall(dev_priv); 85351951ae7SMika Kuoppala 854b16b2a2fSPaulo Zanoni GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 855df0d28c1SDhinakaran Pandiyan 856*72e9abc3SJani Nikula gen11_master_intr_enable(intel_uncore_regs(uncore)); 8572939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); 85851951ae7SMika Kuoppala } 85922e26af7SPaulo Zanoni 86022e26af7SPaulo Zanoni static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) 86122e26af7SPaulo Zanoni { 862d1f3b5e9SAndi Shyti struct intel_uncore *uncore = &dev_priv->uncore; 86322e26af7SPaulo Zanoni u32 gu_misc_masked = GEN11_GU_MISC_GSE; 864d1f3b5e9SAndi Shyti struct intel_gt *gt; 865d1f3b5e9SAndi Shyti unsigned int i; 86622e26af7SPaulo Zanoni 867d1f3b5e9SAndi Shyti for_each_gt(gt, dev_priv, i) 868fd4d7904SPaulo Zanoni gen11_gt_irq_postinstall(gt); 86922e26af7SPaulo Zanoni 87022e26af7SPaulo Zanoni GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 87122e26af7SPaulo Zanoni 87222e26af7SPaulo Zanoni if (HAS_DISPLAY(dev_priv)) { 873babde06dSMika Kahola if (DISPLAY_VER(dev_priv) >= 14) 874babde06dSMika Kahola mtp_irq_postinstall(dev_priv); 875babde06dSMika Kahola else 87622e26af7SPaulo Zanoni icp_irq_postinstall(dev_priv); 877babde06dSMika Kahola 87822e26af7SPaulo Zanoni gen8_de_irq_postinstall(dev_priv); 87922e26af7SPaulo Zanoni intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 88022e26af7SPaulo Zanoni GEN11_DISPLAY_IRQ_ENABLE); 88122e26af7SPaulo Zanoni } 88222e26af7SPaulo Zanoni 883*72e9abc3SJani Nikula dg1_master_intr_enable(intel_uncore_regs(uncore)); 884fd4d7904SPaulo Zanoni intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); 88597b492f5SLucas De Marchi } 88651951ae7SMika Kuoppala 887b318b824SVille Syrjälä static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) 88843f328d7SVille Syrjälä { 8892cbc876dSMichał Winiarski gen8_gt_irq_postinstall(to_gt(dev_priv)); 89043f328d7SVille Syrjälä 891ad22d106SVille Syrjälä spin_lock_irq(&dev_priv->irq_lock); 8929918271eSVille Syrjälä if (dev_priv->display_irqs_enabled) 893ad22d106SVille Syrjälä vlv_display_irq_postinstall(dev_priv); 894ad22d106SVille Syrjälä spin_unlock_irq(&dev_priv->irq_lock); 895ad22d106SVille Syrjälä 8962939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 8972939eb06SJani Nikula intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); 89843f328d7SVille Syrjälä } 89943f328d7SVille Syrjälä 900b318b824SVille Syrjälä static void i8xx_irq_reset(struct drm_i915_private *dev_priv) 901c2798b19SChris Wilson { 902b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 903c2798b19SChris Wilson 90444d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 90544d9241eSVille Syrjälä 906ad7632ffSJani Nikula gen2_irq_reset(uncore); 907e44adb5dSChris Wilson dev_priv->irq_mask = ~0u; 908c2798b19SChris Wilson } 909c2798b19SChris Wilson 9103687ce75SVille Syrjälä static u32 i9xx_error_mask(struct drm_i915_private *i915) 9113687ce75SVille Syrjälä { 912e7e12f6eSVille Syrjälä /* 913e7e12f6eSVille Syrjälä * On gen2/3 FBC generates (seemingly spurious) 914e7e12f6eSVille Syrjälä * display INVALID_GTT/INVALID_GTT_PTE table errors. 915e7e12f6eSVille Syrjälä * 916e7e12f6eSVille Syrjälä * Also gen3 bspec has this to say: 917e7e12f6eSVille Syrjälä * "DISPA_INVALID_GTT_PTE 918e7e12f6eSVille Syrjälä " [DevNapa] : Reserved. This bit does not reflect the page 919e7e12f6eSVille Syrjälä " table error for the display plane A." 920e7e12f6eSVille Syrjälä * 921e7e12f6eSVille Syrjälä * Unfortunately we can't mask off individual PGTBL_ER bits, 922e7e12f6eSVille Syrjälä * so we just have to mask off all page table errors via EMR. 923e7e12f6eSVille Syrjälä */ 924e7e12f6eSVille Syrjälä if (HAS_FBC(i915)) 925e7e12f6eSVille Syrjälä return ~I915_ERROR_MEMORY_REFRESH; 926e7e12f6eSVille Syrjälä else 9273687ce75SVille Syrjälä return ~(I915_ERROR_PAGE_TABLE | 9283687ce75SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 9293687ce75SVille Syrjälä } 9303687ce75SVille Syrjälä 931b318b824SVille Syrjälä static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) 932c2798b19SChris Wilson { 933b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 934e9e9848aSVille Syrjälä u16 enable_mask; 935c2798b19SChris Wilson 9363687ce75SVille Syrjälä intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); 937c2798b19SChris Wilson 938c2798b19SChris Wilson /* Unmask the interrupts that we always want on. */ 939c2798b19SChris Wilson dev_priv->irq_mask = 940c2798b19SChris Wilson ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 94116659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 94216659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 943c2798b19SChris Wilson 944e9e9848aSVille Syrjälä enable_mask = 945c2798b19SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 946c2798b19SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 94716659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 948e9e9848aSVille Syrjälä I915_USER_INTERRUPT; 949e9e9848aSVille Syrjälä 950ad7632ffSJani Nikula gen2_irq_init(uncore, dev_priv->irq_mask, enable_mask); 951c2798b19SChris Wilson 952379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 953379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 954d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 955755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 956755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 957d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 958c2798b19SChris Wilson } 959c2798b19SChris Wilson 9604f5fd91fSTvrtko Ursulin static void i8xx_error_irq_ack(struct drm_i915_private *i915, 96178c357ddSVille Syrjälä u16 *eir, u16 *eir_stuck) 96278c357ddSVille Syrjälä { 9634f5fd91fSTvrtko Ursulin struct intel_uncore *uncore = &i915->uncore; 96478c357ddSVille Syrjälä u16 emr; 96578c357ddSVille Syrjälä 9664f5fd91fSTvrtko Ursulin *eir = intel_uncore_read16(uncore, EIR); 9674f5fd91fSTvrtko Ursulin intel_uncore_write16(uncore, EIR, *eir); 96878c357ddSVille Syrjälä 9694f5fd91fSTvrtko Ursulin *eir_stuck = intel_uncore_read16(uncore, EIR); 97078c357ddSVille Syrjälä if (*eir_stuck == 0) 97178c357ddSVille Syrjälä return; 97278c357ddSVille Syrjälä 97378c357ddSVille Syrjälä /* 97478c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 97578c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 97678c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 97778c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 97878c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 97978c357ddSVille Syrjälä * cleared except by handling the underlying error 98078c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 98178c357ddSVille Syrjälä * remains set. 98278c357ddSVille Syrjälä */ 9834f5fd91fSTvrtko Ursulin emr = intel_uncore_read16(uncore, EMR); 9844f5fd91fSTvrtko Ursulin intel_uncore_write16(uncore, EMR, 0xffff); 9854f5fd91fSTvrtko Ursulin intel_uncore_write16(uncore, EMR, emr | *eir_stuck); 98678c357ddSVille Syrjälä } 98778c357ddSVille Syrjälä 98878c357ddSVille Syrjälä static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 98978c357ddSVille Syrjälä u16 eir, u16 eir_stuck) 99078c357ddSVille Syrjälä { 991a10234fdSTvrtko Ursulin drm_dbg(&dev_priv->drm, "Master Error: EIR 0x%04x\n", eir); 99278c357ddSVille Syrjälä 99378c357ddSVille Syrjälä if (eir_stuck) 99400376ccfSWambui Karuga drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", 99500376ccfSWambui Karuga eir_stuck); 996d1e89592SVille Syrjälä 997d1e89592SVille Syrjälä drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 998d1e89592SVille Syrjälä intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 99978c357ddSVille Syrjälä } 100078c357ddSVille Syrjälä 100178c357ddSVille Syrjälä static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 100278c357ddSVille Syrjälä u32 *eir, u32 *eir_stuck) 100378c357ddSVille Syrjälä { 100478c357ddSVille Syrjälä u32 emr; 100578c357ddSVille Syrjälä 1006839259b8SVille Syrjälä *eir = intel_uncore_read(&dev_priv->uncore, EIR); 1007839259b8SVille Syrjälä intel_uncore_write(&dev_priv->uncore, EIR, *eir); 100878c357ddSVille Syrjälä 10092939eb06SJani Nikula *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); 101078c357ddSVille Syrjälä if (*eir_stuck == 0) 101178c357ddSVille Syrjälä return; 101278c357ddSVille Syrjälä 101378c357ddSVille Syrjälä /* 101478c357ddSVille Syrjälä * Toggle all EMR bits to make sure we get an edge 101578c357ddSVille Syrjälä * in the ISR master error bit if we don't clear 101678c357ddSVille Syrjälä * all the EIR bits. Otherwise the edge triggered 101778c357ddSVille Syrjälä * IIR on i965/g4x wouldn't notice that an interrupt 101878c357ddSVille Syrjälä * is still pending. Also some EIR bits can't be 101978c357ddSVille Syrjälä * cleared except by handling the underlying error 102078c357ddSVille Syrjälä * (or by a GPU reset) so we mask any bit that 102178c357ddSVille Syrjälä * remains set. 102278c357ddSVille Syrjälä */ 1023839259b8SVille Syrjälä emr = intel_uncore_read(&dev_priv->uncore, EMR); 1024839259b8SVille Syrjälä intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); 10252939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); 102678c357ddSVille Syrjälä } 102778c357ddSVille Syrjälä 102878c357ddSVille Syrjälä static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 102978c357ddSVille Syrjälä u32 eir, u32 eir_stuck) 103078c357ddSVille Syrjälä { 1031a10234fdSTvrtko Ursulin drm_dbg(&dev_priv->drm, "Master Error, EIR 0x%08x\n", eir); 103278c357ddSVille Syrjälä 103378c357ddSVille Syrjälä if (eir_stuck) 103400376ccfSWambui Karuga drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", 103500376ccfSWambui Karuga eir_stuck); 1036d1e89592SVille Syrjälä 1037d1e89592SVille Syrjälä drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", 1038d1e89592SVille Syrjälä intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); 103978c357ddSVille Syrjälä } 104078c357ddSVille Syrjälä 1041ff1f525eSDaniel Vetter static irqreturn_t i8xx_irq_handler(int irq, void *arg) 1042c2798b19SChris Wilson { 1043b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 1044af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 1045c2798b19SChris Wilson 10462dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 10472dd2a883SImre Deak return IRQ_NONE; 10482dd2a883SImre Deak 10491f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 10509102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 10511f814dacSImre Deak 1052af722d28SVille Syrjälä do { 1053af722d28SVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 105478c357ddSVille Syrjälä u16 eir = 0, eir_stuck = 0; 1055af722d28SVille Syrjälä u16 iir; 1056af722d28SVille Syrjälä 10574f5fd91fSTvrtko Ursulin iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR); 1058c2798b19SChris Wilson if (iir == 0) 1059af722d28SVille Syrjälä break; 1060c2798b19SChris Wilson 1061af722d28SVille Syrjälä ret = IRQ_HANDLED; 1062c2798b19SChris Wilson 1063eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 1064eb64343cSVille Syrjälä * signalled in iir */ 1065eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1066c2798b19SChris Wilson 106778c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 106878c357ddSVille Syrjälä i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 106978c357ddSVille Syrjälä 10704f5fd91fSTvrtko Ursulin intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir); 1071c2798b19SChris Wilson 1072c2798b19SChris Wilson if (iir & I915_USER_INTERRUPT) 10732cbc876dSMichał Winiarski intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 1074c2798b19SChris Wilson 107578c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 107678c357ddSVille Syrjälä i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 1077af722d28SVille Syrjälä 1078eb64343cSVille Syrjälä i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 1079af722d28SVille Syrjälä } while (0); 1080c2798b19SChris Wilson 10819c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, ret); 10829c6508b9SThomas Gleixner 10839102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 10841f814dacSImre Deak 10851f814dacSImre Deak return ret; 1086c2798b19SChris Wilson } 1087c2798b19SChris Wilson 1088b318b824SVille Syrjälä static void i915_irq_reset(struct drm_i915_private *dev_priv) 1089a266c7d5SChris Wilson { 1090b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 1091a266c7d5SChris Wilson 109256b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 10930706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 10948cee664dSAndrzej Hajda intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_STAT, 0, 0); 1095a266c7d5SChris Wilson } 1096a266c7d5SChris Wilson 109744d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 109844d9241eSVille Syrjälä 1099b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN2_); 1100e44adb5dSChris Wilson dev_priv->irq_mask = ~0u; 1101a266c7d5SChris Wilson } 1102a266c7d5SChris Wilson 1103b318b824SVille Syrjälä static void i915_irq_postinstall(struct drm_i915_private *dev_priv) 1104a266c7d5SChris Wilson { 1105b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 110638bde180SChris Wilson u32 enable_mask; 1107a266c7d5SChris Wilson 11083687ce75SVille Syrjälä intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); 110938bde180SChris Wilson 111038bde180SChris Wilson /* Unmask the interrupts that we always want on. */ 111138bde180SChris Wilson dev_priv->irq_mask = 111238bde180SChris Wilson ~(I915_ASLE_INTERRUPT | 111338bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 111416659bc5SVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 111516659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 111638bde180SChris Wilson 111738bde180SChris Wilson enable_mask = 111838bde180SChris Wilson I915_ASLE_INTERRUPT | 111938bde180SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 112038bde180SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 112116659bc5SVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 112238bde180SChris Wilson I915_USER_INTERRUPT; 112338bde180SChris Wilson 112456b857a5STvrtko Ursulin if (I915_HAS_HOTPLUG(dev_priv)) { 1125a266c7d5SChris Wilson /* Enable in IER... */ 1126a266c7d5SChris Wilson enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1127a266c7d5SChris Wilson /* and unmask in IMR */ 1128a266c7d5SChris Wilson dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 1129a266c7d5SChris Wilson } 1130a266c7d5SChris Wilson 1131b16b2a2fSPaulo Zanoni GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 1132a266c7d5SChris Wilson 1133379ef82dSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 1134379ef82dSDaniel Vetter * just to make the assert_spin_locked check happy. */ 1135d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1136755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1137755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1138d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 1139379ef82dSDaniel Vetter 1140c30bb1fdSVille Syrjälä i915_enable_asle_pipestat(dev_priv); 114120afbda2SDaniel Vetter } 114220afbda2SDaniel Vetter 1143ff1f525eSDaniel Vetter static irqreturn_t i915_irq_handler(int irq, void *arg) 1144a266c7d5SChris Wilson { 1145b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 1146af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 1147a266c7d5SChris Wilson 11482dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 11492dd2a883SImre Deak return IRQ_NONE; 11502dd2a883SImre Deak 11511f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 11529102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 11531f814dacSImre Deak 115438bde180SChris Wilson do { 1155eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 115678c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 1157af722d28SVille Syrjälä u32 hotplug_status = 0; 1158af722d28SVille Syrjälä u32 iir; 1159a266c7d5SChris Wilson 11602939eb06SJani Nikula iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 1161af722d28SVille Syrjälä if (iir == 0) 1162af722d28SVille Syrjälä break; 1163af722d28SVille Syrjälä 1164af722d28SVille Syrjälä ret = IRQ_HANDLED; 1165af722d28SVille Syrjälä 1166af722d28SVille Syrjälä if (I915_HAS_HOTPLUG(dev_priv) && 1167af722d28SVille Syrjälä iir & I915_DISPLAY_PORT_INTERRUPT) 1168af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1169a266c7d5SChris Wilson 1170eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 1171eb64343cSVille Syrjälä * signalled in iir */ 1172eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1173a266c7d5SChris Wilson 117478c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 117578c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 117678c357ddSVille Syrjälä 11772939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 1178a266c7d5SChris Wilson 1179a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 11802cbc876dSMichał Winiarski intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir); 1181a266c7d5SChris Wilson 118278c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 118378c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 1184a266c7d5SChris Wilson 1185af722d28SVille Syrjälä if (hotplug_status) 1186af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1187af722d28SVille Syrjälä 1188af722d28SVille Syrjälä i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 1189af722d28SVille Syrjälä } while (0); 1190a266c7d5SChris Wilson 11919c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, ret); 11929c6508b9SThomas Gleixner 11939102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 11941f814dacSImre Deak 1195a266c7d5SChris Wilson return ret; 1196a266c7d5SChris Wilson } 1197a266c7d5SChris Wilson 1198b318b824SVille Syrjälä static void i965_irq_reset(struct drm_i915_private *dev_priv) 1199a266c7d5SChris Wilson { 1200b16b2a2fSPaulo Zanoni struct intel_uncore *uncore = &dev_priv->uncore; 1201a266c7d5SChris Wilson 12020706f17cSEgbert Eich i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 12038cee664dSAndrzej Hajda intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 1204a266c7d5SChris Wilson 120544d9241eSVille Syrjälä i9xx_pipestat_irq_reset(dev_priv); 120644d9241eSVille Syrjälä 1207b16b2a2fSPaulo Zanoni GEN3_IRQ_RESET(uncore, GEN2_); 1208e44adb5dSChris Wilson dev_priv->irq_mask = ~0u; 1209a266c7d5SChris Wilson } 1210a266c7d5SChris Wilson 12113687ce75SVille Syrjälä static u32 i965_error_mask(struct drm_i915_private *i915) 1212a266c7d5SChris Wilson { 1213045cebd2SVille Syrjälä /* 1214045cebd2SVille Syrjälä * Enable some error detection, note the instruction error mask 1215045cebd2SVille Syrjälä * bit is reserved, so we leave it masked. 1216e7e12f6eSVille Syrjälä * 1217e7e12f6eSVille Syrjälä * i965 FBC no longer generates spurious GTT errors, 1218e7e12f6eSVille Syrjälä * so we can always enable the page table errors. 1219045cebd2SVille Syrjälä */ 12203687ce75SVille Syrjälä if (IS_G4X(i915)) 12213687ce75SVille Syrjälä return ~(GM45_ERROR_PAGE_TABLE | 1222045cebd2SVille Syrjälä GM45_ERROR_MEM_PRIV | 1223045cebd2SVille Syrjälä GM45_ERROR_CP_PRIV | 1224045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 12253687ce75SVille Syrjälä else 12263687ce75SVille Syrjälä return ~(I915_ERROR_PAGE_TABLE | 1227045cebd2SVille Syrjälä I915_ERROR_MEMORY_REFRESH); 1228045cebd2SVille Syrjälä } 12293687ce75SVille Syrjälä 12303687ce75SVille Syrjälä static void i965_irq_postinstall(struct drm_i915_private *dev_priv) 12313687ce75SVille Syrjälä { 12323687ce75SVille Syrjälä struct intel_uncore *uncore = &dev_priv->uncore; 12333687ce75SVille Syrjälä u32 enable_mask; 12343687ce75SVille Syrjälä 12353687ce75SVille Syrjälä intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); 1236045cebd2SVille Syrjälä 1237a266c7d5SChris Wilson /* Unmask the interrupts that we always want on. */ 1238c30bb1fdSVille Syrjälä dev_priv->irq_mask = 1239c30bb1fdSVille Syrjälä ~(I915_ASLE_INTERRUPT | 1240adca4730SChris Wilson I915_DISPLAY_PORT_INTERRUPT | 1241bbba0a97SChris Wilson I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1242bbba0a97SChris Wilson I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 124378c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT); 1244bbba0a97SChris Wilson 1245c30bb1fdSVille Syrjälä enable_mask = 1246c30bb1fdSVille Syrjälä I915_ASLE_INTERRUPT | 1247c30bb1fdSVille Syrjälä I915_DISPLAY_PORT_INTERRUPT | 1248c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1249c30bb1fdSVille Syrjälä I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 125078c357ddSVille Syrjälä I915_MASTER_ERROR_INTERRUPT | 1251c30bb1fdSVille Syrjälä I915_USER_INTERRUPT; 1252bbba0a97SChris Wilson 125391d14251STvrtko Ursulin if (IS_G4X(dev_priv)) 1254bbba0a97SChris Wilson enable_mask |= I915_BSD_USER_INTERRUPT; 1255a266c7d5SChris Wilson 1256b16b2a2fSPaulo Zanoni GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask); 1257c30bb1fdSVille Syrjälä 1258b79480baSDaniel Vetter /* Interrupt setup is already guaranteed to be single-threaded, this is 1259b79480baSDaniel Vetter * just to make the assert_spin_locked check happy. */ 1260d6207435SDaniel Vetter spin_lock_irq(&dev_priv->irq_lock); 1261755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1262755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 1263755e9019SImre Deak i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 1264d6207435SDaniel Vetter spin_unlock_irq(&dev_priv->irq_lock); 1265a266c7d5SChris Wilson 126691d14251STvrtko Ursulin i915_enable_asle_pipestat(dev_priv); 126720afbda2SDaniel Vetter } 126820afbda2SDaniel Vetter 1269ff1f525eSDaniel Vetter static irqreturn_t i965_irq_handler(int irq, void *arg) 1270a266c7d5SChris Wilson { 1271b318b824SVille Syrjälä struct drm_i915_private *dev_priv = arg; 1272af722d28SVille Syrjälä irqreturn_t ret = IRQ_NONE; 1273a266c7d5SChris Wilson 12742dd2a883SImre Deak if (!intel_irqs_enabled(dev_priv)) 12752dd2a883SImre Deak return IRQ_NONE; 12762dd2a883SImre Deak 12771f814dacSImre Deak /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 12789102650fSDaniele Ceraolo Spurio disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 12791f814dacSImre Deak 1280af722d28SVille Syrjälä do { 1281eb64343cSVille Syrjälä u32 pipe_stats[I915_MAX_PIPES] = {}; 128278c357ddSVille Syrjälä u32 eir = 0, eir_stuck = 0; 1283af722d28SVille Syrjälä u32 hotplug_status = 0; 1284af722d28SVille Syrjälä u32 iir; 12852c8ba29fSChris Wilson 12862939eb06SJani Nikula iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); 1287af722d28SVille Syrjälä if (iir == 0) 1288af722d28SVille Syrjälä break; 1289af722d28SVille Syrjälä 1290af722d28SVille Syrjälä ret = IRQ_HANDLED; 1291af722d28SVille Syrjälä 1292af722d28SVille Syrjälä if (iir & I915_DISPLAY_PORT_INTERRUPT) 1293af722d28SVille Syrjälä hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1294a266c7d5SChris Wilson 1295eb64343cSVille Syrjälä /* Call regardless, as some status bits might not be 1296eb64343cSVille Syrjälä * signalled in iir */ 1297eb64343cSVille Syrjälä i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1298a266c7d5SChris Wilson 129978c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 130078c357ddSVille Syrjälä i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 130178c357ddSVille Syrjälä 13022939eb06SJani Nikula intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); 1303a266c7d5SChris Wilson 1304a266c7d5SChris Wilson if (iir & I915_USER_INTERRUPT) 13052cbc876dSMichał Winiarski intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], 13060669a6e1SChris Wilson iir); 1307af722d28SVille Syrjälä 1308a266c7d5SChris Wilson if (iir & I915_BSD_USER_INTERRUPT) 13092cbc876dSMichał Winiarski intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0], 13100669a6e1SChris Wilson iir >> 25); 1311a266c7d5SChris Wilson 131278c357ddSVille Syrjälä if (iir & I915_MASTER_ERROR_INTERRUPT) 131378c357ddSVille Syrjälä i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 1314515ac2bbSDaniel Vetter 1315af722d28SVille Syrjälä if (hotplug_status) 1316af722d28SVille Syrjälä i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1317af722d28SVille Syrjälä 1318af722d28SVille Syrjälä i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 1319af722d28SVille Syrjälä } while (0); 1320a266c7d5SChris Wilson 13219c6508b9SThomas Gleixner pmu_irq_stats(dev_priv, IRQ_HANDLED); 13229c6508b9SThomas Gleixner 13239102650fSDaniele Ceraolo Spurio enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); 13241f814dacSImre Deak 1325a266c7d5SChris Wilson return ret; 1326a266c7d5SChris Wilson } 1327a266c7d5SChris Wilson 1328fca52a55SDaniel Vetter /** 1329fca52a55SDaniel Vetter * intel_irq_init - initializes irq support 1330fca52a55SDaniel Vetter * @dev_priv: i915 device instance 1331fca52a55SDaniel Vetter * 1332fca52a55SDaniel Vetter * This function initializes all the irq support including work items, timers 1333fca52a55SDaniel Vetter * and all the vtables. It does not setup the interrupt itself though. 1334fca52a55SDaniel Vetter */ 1335b963291cSDaniel Vetter void intel_irq_init(struct drm_i915_private *dev_priv) 1336f71d4af4SJesse Barnes { 1337cefcff8fSJoonas Lahtinen int i; 13388b2e326dSChris Wilson 133974bb98baSLucas De Marchi INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work); 1340cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 1341cefcff8fSJoonas Lahtinen dev_priv->l3_parity.remap_info[i] = NULL; 13428b2e326dSChris Wilson 1343633023a4SDaniele Ceraolo Spurio /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */ 1344651e7d48SLucas De Marchi if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11) 13452cbc876dSMichał Winiarski to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16; 134626705e20SSagar Arun Kamble 13479a450b68SLucas De Marchi if (!HAS_DISPLAY(dev_priv)) 13489a450b68SLucas De Marchi return; 13499a450b68SLucas De Marchi 13503703060dSAndrzej Hajda dev_priv->drm.vblank_disable_immediate = true; 135121da2700SVille Syrjälä 1352262fd485SChris Wilson /* Most platforms treat the display irq block as an always-on 1353262fd485SChris Wilson * power domain. vlv/chv can disable it at runtime and need 1354262fd485SChris Wilson * special care to avoid writing any of the display block registers 1355262fd485SChris Wilson * outside of the power domain. We defer setting up the display irqs 1356262fd485SChris Wilson * in this case to the runtime pm. 1357262fd485SChris Wilson */ 1358262fd485SChris Wilson dev_priv->display_irqs_enabled = true; 1359262fd485SChris Wilson if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1360262fd485SChris Wilson dev_priv->display_irqs_enabled = false; 1361262fd485SChris Wilson 1362da38ba98SJani Nikula intel_hotplug_irq_init(dev_priv); 13632ccf2e03SChris Wilson } 136420afbda2SDaniel Vetter 1365fca52a55SDaniel Vetter /** 1366cefcff8fSJoonas Lahtinen * intel_irq_fini - deinitializes IRQ support 1367cefcff8fSJoonas Lahtinen * @i915: i915 device instance 1368cefcff8fSJoonas Lahtinen * 1369cefcff8fSJoonas Lahtinen * This function deinitializes all the IRQ support. 1370cefcff8fSJoonas Lahtinen */ 1371cefcff8fSJoonas Lahtinen void intel_irq_fini(struct drm_i915_private *i915) 1372cefcff8fSJoonas Lahtinen { 1373cefcff8fSJoonas Lahtinen int i; 1374cefcff8fSJoonas Lahtinen 1375cefcff8fSJoonas Lahtinen for (i = 0; i < MAX_L3_SLICES; ++i) 1376cefcff8fSJoonas Lahtinen kfree(i915->l3_parity.remap_info[i]); 1377cefcff8fSJoonas Lahtinen } 1378cefcff8fSJoonas Lahtinen 1379b318b824SVille Syrjälä static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) 1380b318b824SVille Syrjälä { 1381b318b824SVille Syrjälä if (HAS_GMCH(dev_priv)) { 1382b318b824SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 1383b318b824SVille Syrjälä return cherryview_irq_handler; 1384b318b824SVille Syrjälä else if (IS_VALLEYVIEW(dev_priv)) 1385b318b824SVille Syrjälä return valleyview_irq_handler; 1386651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 4) 1387b318b824SVille Syrjälä return i965_irq_handler; 1388651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 3) 1389b318b824SVille Syrjälä return i915_irq_handler; 1390b318b824SVille Syrjälä else 1391b318b824SVille Syrjälä return i8xx_irq_handler; 1392b318b824SVille Syrjälä } else { 139322e26af7SPaulo Zanoni if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 139497b492f5SLucas De Marchi return dg1_irq_handler; 139522e26af7SPaulo Zanoni else if (GRAPHICS_VER(dev_priv) >= 11) 1396b318b824SVille Syrjälä return gen11_irq_handler; 1397651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) >= 8) 1398b318b824SVille Syrjälä return gen8_irq_handler; 1399b318b824SVille Syrjälä else 14009eae5e27SLucas De Marchi return ilk_irq_handler; 1401b318b824SVille Syrjälä } 1402b318b824SVille Syrjälä } 1403b318b824SVille Syrjälä 1404b318b824SVille Syrjälä static void intel_irq_reset(struct drm_i915_private *dev_priv) 1405b318b824SVille Syrjälä { 1406b318b824SVille Syrjälä if (HAS_GMCH(dev_priv)) { 1407b318b824SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 1408b318b824SVille Syrjälä cherryview_irq_reset(dev_priv); 1409b318b824SVille Syrjälä else if (IS_VALLEYVIEW(dev_priv)) 1410b318b824SVille Syrjälä valleyview_irq_reset(dev_priv); 1411651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 4) 1412b318b824SVille Syrjälä i965_irq_reset(dev_priv); 1413651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 3) 1414b318b824SVille Syrjälä i915_irq_reset(dev_priv); 1415b318b824SVille Syrjälä else 1416b318b824SVille Syrjälä i8xx_irq_reset(dev_priv); 1417b318b824SVille Syrjälä } else { 141822e26af7SPaulo Zanoni if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 141922e26af7SPaulo Zanoni dg1_irq_reset(dev_priv); 142022e26af7SPaulo Zanoni else if (GRAPHICS_VER(dev_priv) >= 11) 1421b318b824SVille Syrjälä gen11_irq_reset(dev_priv); 1422651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) >= 8) 1423b318b824SVille Syrjälä gen8_irq_reset(dev_priv); 1424b318b824SVille Syrjälä else 14259eae5e27SLucas De Marchi ilk_irq_reset(dev_priv); 1426b318b824SVille Syrjälä } 1427b318b824SVille Syrjälä } 1428b318b824SVille Syrjälä 1429b318b824SVille Syrjälä static void intel_irq_postinstall(struct drm_i915_private *dev_priv) 1430b318b824SVille Syrjälä { 1431b318b824SVille Syrjälä if (HAS_GMCH(dev_priv)) { 1432b318b824SVille Syrjälä if (IS_CHERRYVIEW(dev_priv)) 1433b318b824SVille Syrjälä cherryview_irq_postinstall(dev_priv); 1434b318b824SVille Syrjälä else if (IS_VALLEYVIEW(dev_priv)) 1435b318b824SVille Syrjälä valleyview_irq_postinstall(dev_priv); 1436651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 4) 1437b318b824SVille Syrjälä i965_irq_postinstall(dev_priv); 1438651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) == 3) 1439b318b824SVille Syrjälä i915_irq_postinstall(dev_priv); 1440b318b824SVille Syrjälä else 1441b318b824SVille Syrjälä i8xx_irq_postinstall(dev_priv); 1442b318b824SVille Syrjälä } else { 144322e26af7SPaulo Zanoni if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10)) 144422e26af7SPaulo Zanoni dg1_irq_postinstall(dev_priv); 144522e26af7SPaulo Zanoni else if (GRAPHICS_VER(dev_priv) >= 11) 1446b318b824SVille Syrjälä gen11_irq_postinstall(dev_priv); 1447651e7d48SLucas De Marchi else if (GRAPHICS_VER(dev_priv) >= 8) 1448b318b824SVille Syrjälä gen8_irq_postinstall(dev_priv); 1449b318b824SVille Syrjälä else 14509eae5e27SLucas De Marchi ilk_irq_postinstall(dev_priv); 1451b318b824SVille Syrjälä } 1452b318b824SVille Syrjälä } 1453b318b824SVille Syrjälä 1454cefcff8fSJoonas Lahtinen /** 1455fca52a55SDaniel Vetter * intel_irq_install - enables the hardware interrupt 1456fca52a55SDaniel Vetter * @dev_priv: i915 device instance 1457fca52a55SDaniel Vetter * 1458fca52a55SDaniel Vetter * This function enables the hardware interrupt handling, but leaves the hotplug 1459fca52a55SDaniel Vetter * handling still disabled. It is called after intel_irq_init(). 1460fca52a55SDaniel Vetter * 1461fca52a55SDaniel Vetter * In the driver load and resume code we need working interrupts in a few places 1462fca52a55SDaniel Vetter * but don't want to deal with the hassle of concurrent probe and hotplug 1463fca52a55SDaniel Vetter * workers. Hence the split into this two-stage approach. 1464fca52a55SDaniel Vetter */ 14652aeb7d3aSDaniel Vetter int intel_irq_install(struct drm_i915_private *dev_priv) 14662aeb7d3aSDaniel Vetter { 14678ff5446aSThomas Zimmermann int irq = to_pci_dev(dev_priv->drm.dev)->irq; 1468b318b824SVille Syrjälä int ret; 1469b318b824SVille Syrjälä 14702aeb7d3aSDaniel Vetter /* 14712aeb7d3aSDaniel Vetter * We enable some interrupt sources in our postinstall hooks, so mark 14722aeb7d3aSDaniel Vetter * interrupts as enabled _before_ actually enabling them to avoid 14732aeb7d3aSDaniel Vetter * special cases in our ordering checks. 14742aeb7d3aSDaniel Vetter */ 1475ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 14762aeb7d3aSDaniel Vetter 1477ac1723c1SThomas Zimmermann dev_priv->irq_enabled = true; 1478b318b824SVille Syrjälä 1479b318b824SVille Syrjälä intel_irq_reset(dev_priv); 1480b318b824SVille Syrjälä 1481b318b824SVille Syrjälä ret = request_irq(irq, intel_irq_handler(dev_priv), 1482b318b824SVille Syrjälä IRQF_SHARED, DRIVER_NAME, dev_priv); 1483b318b824SVille Syrjälä if (ret < 0) { 1484ac1723c1SThomas Zimmermann dev_priv->irq_enabled = false; 1485b318b824SVille Syrjälä return ret; 1486b318b824SVille Syrjälä } 1487b318b824SVille Syrjälä 1488b318b824SVille Syrjälä intel_irq_postinstall(dev_priv); 1489b318b824SVille Syrjälä 1490b318b824SVille Syrjälä return ret; 14912aeb7d3aSDaniel Vetter } 14922aeb7d3aSDaniel Vetter 1493fca52a55SDaniel Vetter /** 1494fca52a55SDaniel Vetter * intel_irq_uninstall - finilizes all irq handling 1495fca52a55SDaniel Vetter * @dev_priv: i915 device instance 1496fca52a55SDaniel Vetter * 1497fca52a55SDaniel Vetter * This stops interrupt and hotplug handling and unregisters and frees all 1498fca52a55SDaniel Vetter * resources acquired in the init functions. 1499fca52a55SDaniel Vetter */ 15002aeb7d3aSDaniel Vetter void intel_irq_uninstall(struct drm_i915_private *dev_priv) 15012aeb7d3aSDaniel Vetter { 15028ff5446aSThomas Zimmermann int irq = to_pci_dev(dev_priv->drm.dev)->irq; 1503b318b824SVille Syrjälä 1504b318b824SVille Syrjälä /* 1505789fa874SJanusz Krzysztofik * FIXME we can get called twice during driver probe 1506789fa874SJanusz Krzysztofik * error handling as well as during driver remove due to 150786a1758dSJani Nikula * intel_display_driver_remove() calling us out of sequence. 1508789fa874SJanusz Krzysztofik * Would be nice if it didn't do that... 1509b318b824SVille Syrjälä */ 1510ac1723c1SThomas Zimmermann if (!dev_priv->irq_enabled) 1511b318b824SVille Syrjälä return; 1512b318b824SVille Syrjälä 1513ac1723c1SThomas Zimmermann dev_priv->irq_enabled = false; 1514b318b824SVille Syrjälä 1515b318b824SVille Syrjälä intel_irq_reset(dev_priv); 1516b318b824SVille Syrjälä 1517b318b824SVille Syrjälä free_irq(irq, dev_priv); 1518b318b824SVille Syrjälä 15192aeb7d3aSDaniel Vetter intel_hpd_cancel_work(dev_priv); 1520ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 15212aeb7d3aSDaniel Vetter } 15222aeb7d3aSDaniel Vetter 1523fca52a55SDaniel Vetter /** 1524fca52a55SDaniel Vetter * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 1525fca52a55SDaniel Vetter * @dev_priv: i915 device instance 1526fca52a55SDaniel Vetter * 1527fca52a55SDaniel Vetter * This function is used to disable interrupts at runtime, both in the runtime 1528fca52a55SDaniel Vetter * pm and the system suspend/resume code. 1529fca52a55SDaniel Vetter */ 1530b963291cSDaniel Vetter void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 1531c67a470bSPaulo Zanoni { 1532b318b824SVille Syrjälä intel_irq_reset(dev_priv); 1533ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = false; 1534315ca4c4SVille Syrjälä intel_synchronize_irq(dev_priv); 1535c67a470bSPaulo Zanoni } 1536c67a470bSPaulo Zanoni 1537fca52a55SDaniel Vetter /** 1538fca52a55SDaniel Vetter * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 1539fca52a55SDaniel Vetter * @dev_priv: i915 device instance 1540fca52a55SDaniel Vetter * 1541fca52a55SDaniel Vetter * This function is used to enable interrupts at runtime, both in the runtime 1542fca52a55SDaniel Vetter * pm and the system suspend/resume code. 1543fca52a55SDaniel Vetter */ 1544b963291cSDaniel Vetter void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 1545c67a470bSPaulo Zanoni { 1546ad1443f0SSagar Arun Kamble dev_priv->runtime_pm.irqs_enabled = true; 1547b318b824SVille Syrjälä intel_irq_reset(dev_priv); 1548b318b824SVille Syrjälä intel_irq_postinstall(dev_priv); 1549c67a470bSPaulo Zanoni } 1550d64575eeSJani Nikula 1551d64575eeSJani Nikula bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 1552d64575eeSJani Nikula { 1553d64575eeSJani Nikula return dev_priv->runtime_pm.irqs_enabled; 1554d64575eeSJani Nikula } 1555d64575eeSJani Nikula 1556d64575eeSJani Nikula void intel_synchronize_irq(struct drm_i915_private *i915) 1557d64575eeSJani Nikula { 15588ff5446aSThomas Zimmermann synchronize_irq(to_pci_dev(i915->drm.dev)->irq); 1559d64575eeSJani Nikula } 1560320ad343SThomas Zimmermann 1561320ad343SThomas Zimmermann void intel_synchronize_hardirq(struct drm_i915_private *i915) 1562320ad343SThomas Zimmermann { 1563320ad343SThomas Zimmermann synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq); 1564320ad343SThomas Zimmermann } 1565