1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_gt.h" 12 #include "intel_gt_irq.h" 13 #include "intel_uncore.h" 14 #include "intel_rps.h" 15 16 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 17 { 18 if (iir & GUC_INTR_GUC2HOST) 19 intel_guc_to_host_event_handler(guc); 20 } 21 22 static void 23 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 24 { 25 bool tasklet = false; 26 27 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { 28 u32 eir; 29 30 eir = ENGINE_READ(engine, RING_EIR); 31 ENGINE_TRACE(engine, "CS error: %x\n", eir); 32 33 /* Disable the error interrupt until after the reset */ 34 if (likely(eir)) { 35 ENGINE_WRITE(engine, RING_EMR, ~0u); 36 ENGINE_WRITE(engine, RING_EIR, eir); 37 WRITE_ONCE(engine->execlists.error_interrupt, eir); 38 tasklet = true; 39 } 40 } 41 42 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 43 tasklet = true; 44 45 if (iir & GT_RENDER_USER_INTERRUPT) { 46 intel_engine_signal_breadcrumbs(engine); 47 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 48 } 49 50 if (tasklet) 51 tasklet_hi_schedule(&engine->execlists.tasklet); 52 } 53 54 static u32 55 gen11_gt_engine_identity(struct intel_gt *gt, 56 const unsigned int bank, const unsigned int bit) 57 { 58 void __iomem * const regs = gt->uncore->regs; 59 u32 timeout_ts; 60 u32 ident; 61 62 lockdep_assert_held(>->irq_lock); 63 64 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 65 66 /* 67 * NB: Specs do not specify how long to spin wait, 68 * so we do ~100us as an educated guess. 69 */ 70 timeout_ts = (local_clock() >> 10) + 100; 71 do { 72 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 73 } while (!(ident & GEN11_INTR_DATA_VALID) && 74 !time_after32(local_clock() >> 10, timeout_ts)); 75 76 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 77 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 78 bank, bit, ident); 79 return 0; 80 } 81 82 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 83 GEN11_INTR_DATA_VALID); 84 85 return ident; 86 } 87 88 static void 89 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 90 const u16 iir) 91 { 92 if (instance == OTHER_GUC_INSTANCE) 93 return guc_irq_handler(>->uc.guc, iir); 94 95 if (instance == OTHER_GTPM_INSTANCE) 96 return gen11_rps_irq_handler(>->rps, iir); 97 98 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 99 instance, iir); 100 } 101 102 static void 103 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 104 const u8 instance, const u16 iir) 105 { 106 struct intel_engine_cs *engine; 107 108 if (instance <= MAX_ENGINE_INSTANCE) 109 engine = gt->engine_class[class][instance]; 110 else 111 engine = NULL; 112 113 if (likely(engine)) 114 return cs_irq_handler(engine, iir); 115 116 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 117 class, instance); 118 } 119 120 static void 121 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 122 { 123 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 124 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 125 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 126 127 if (unlikely(!intr)) 128 return; 129 130 if (class <= COPY_ENGINE_CLASS) 131 return gen11_engine_irq_handler(gt, class, instance, intr); 132 133 if (class == OTHER_CLASS) 134 return gen11_other_irq_handler(gt, instance, intr); 135 136 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 137 class, instance, intr); 138 } 139 140 static void 141 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 142 { 143 void __iomem * const regs = gt->uncore->regs; 144 unsigned long intr_dw; 145 unsigned int bit; 146 147 lockdep_assert_held(>->irq_lock); 148 149 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 150 151 for_each_set_bit(bit, &intr_dw, 32) { 152 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 153 154 gen11_gt_identity_handler(gt, ident); 155 } 156 157 /* Clear must be after shared has been served for engine */ 158 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 159 } 160 161 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 162 { 163 unsigned int bank; 164 165 spin_lock(>->irq_lock); 166 167 for (bank = 0; bank < 2; bank++) { 168 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 169 gen11_gt_bank_handler(gt, bank); 170 } 171 172 spin_unlock(>->irq_lock); 173 } 174 175 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 176 const unsigned int bank, const unsigned int bit) 177 { 178 void __iomem * const regs = gt->uncore->regs; 179 u32 dw; 180 181 lockdep_assert_held(>->irq_lock); 182 183 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 184 if (dw & BIT(bit)) { 185 /* 186 * According to the BSpec, DW_IIR bits cannot be cleared without 187 * first servicing the Selector & Shared IIR registers. 188 */ 189 gen11_gt_engine_identity(gt, bank, bit); 190 191 /* 192 * We locked GT INT DW by reading it. If we want to (try 193 * to) recover from this successfully, we need to clear 194 * our bit, otherwise we are locking the register for 195 * everybody. 196 */ 197 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 198 199 return true; 200 } 201 202 return false; 203 } 204 205 void gen11_gt_irq_reset(struct intel_gt *gt) 206 { 207 struct intel_uncore *uncore = gt->uncore; 208 209 /* Disable RCS, BCS, VCS and VECS class engines. */ 210 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 211 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 212 213 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 214 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 215 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 216 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 217 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 218 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 219 220 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 221 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 222 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 223 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 224 } 225 226 void gen11_gt_irq_postinstall(struct intel_gt *gt) 227 { 228 const u32 irqs = 229 GT_CS_MASTER_ERROR_INTERRUPT | 230 GT_RENDER_USER_INTERRUPT | 231 GT_CONTEXT_SWITCH_INTERRUPT; 232 struct intel_uncore *uncore = gt->uncore; 233 const u32 dmask = irqs << 16 | irqs; 234 const u32 smask = irqs << 16; 235 236 BUILD_BUG_ON(irqs & 0xffff0000); 237 238 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 239 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 240 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 241 242 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 243 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 244 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 245 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 246 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 247 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 248 249 /* 250 * RPS interrupts will get enabled/disabled on demand when RPS itself 251 * is enabled/disabled. 252 */ 253 gt->pm_ier = 0x0; 254 gt->pm_imr = ~gt->pm_ier; 255 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 256 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 257 258 /* Same thing for GuC interrupts */ 259 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 260 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 261 } 262 263 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 264 { 265 if (gt_iir & GT_RENDER_USER_INTERRUPT) 266 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 267 if (gt_iir & ILK_BSD_USER_INTERRUPT) 268 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 269 } 270 271 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 272 { 273 if (!HAS_L3_DPF(gt->i915)) 274 return; 275 276 spin_lock(>->irq_lock); 277 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 278 spin_unlock(>->irq_lock); 279 280 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 281 gt->i915->l3_parity.which_slice |= 1 << 1; 282 283 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 284 gt->i915->l3_parity.which_slice |= 1 << 0; 285 286 schedule_work(>->i915->l3_parity.error_work); 287 } 288 289 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 290 { 291 if (gt_iir & GT_RENDER_USER_INTERRUPT) 292 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 293 if (gt_iir & GT_BSD_USER_INTERRUPT) 294 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 295 if (gt_iir & GT_BLT_USER_INTERRUPT) 296 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 297 298 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 299 GT_BSD_CS_ERROR_INTERRUPT | 300 GT_CS_MASTER_ERROR_INTERRUPT)) 301 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 302 303 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 304 gen7_parity_error_irq_handler(gt, gt_iir); 305 } 306 307 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) 308 { 309 void __iomem * const regs = gt->uncore->regs; 310 u32 iir; 311 312 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 313 iir = raw_reg_read(regs, GEN8_GT_IIR(0)); 314 if (likely(iir)) { 315 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 316 iir >> GEN8_RCS_IRQ_SHIFT); 317 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 318 iir >> GEN8_BCS_IRQ_SHIFT); 319 raw_reg_write(regs, GEN8_GT_IIR(0), iir); 320 } 321 } 322 323 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 324 iir = raw_reg_read(regs, GEN8_GT_IIR(1)); 325 if (likely(iir)) { 326 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 327 iir >> GEN8_VCS0_IRQ_SHIFT); 328 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 329 iir >> GEN8_VCS1_IRQ_SHIFT); 330 raw_reg_write(regs, GEN8_GT_IIR(1), iir); 331 } 332 } 333 334 if (master_ctl & GEN8_GT_VECS_IRQ) { 335 iir = raw_reg_read(regs, GEN8_GT_IIR(3)); 336 if (likely(iir)) { 337 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 338 iir >> GEN8_VECS_IRQ_SHIFT); 339 raw_reg_write(regs, GEN8_GT_IIR(3), iir); 340 } 341 } 342 343 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 344 iir = raw_reg_read(regs, GEN8_GT_IIR(2)); 345 if (likely(iir)) { 346 gen6_rps_irq_handler(>->rps, iir); 347 guc_irq_handler(>->uc.guc, iir >> 16); 348 raw_reg_write(regs, GEN8_GT_IIR(2), iir); 349 } 350 } 351 } 352 353 void gen8_gt_irq_reset(struct intel_gt *gt) 354 { 355 struct intel_uncore *uncore = gt->uncore; 356 357 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 358 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 359 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 360 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 361 } 362 363 void gen8_gt_irq_postinstall(struct intel_gt *gt) 364 { 365 /* These are interrupts we'll toggle with the ring mask register */ 366 const u32 irqs = 367 GT_CS_MASTER_ERROR_INTERRUPT | 368 GT_RENDER_USER_INTERRUPT | 369 GT_CONTEXT_SWITCH_INTERRUPT; 370 const u32 gt_interrupts[] = { 371 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, 372 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 373 0, 374 irqs << GEN8_VECS_IRQ_SHIFT, 375 }; 376 struct intel_uncore *uncore = gt->uncore; 377 378 gt->pm_ier = 0x0; 379 gt->pm_imr = ~gt->pm_ier; 380 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 381 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 382 /* 383 * RPS interrupts will get enabled/disabled on demand when RPS itself 384 * is enabled/disabled. Same wil be the case for GuC interrupts. 385 */ 386 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 387 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 388 } 389 390 static void gen5_gt_update_irq(struct intel_gt *gt, 391 u32 interrupt_mask, 392 u32 enabled_irq_mask) 393 { 394 lockdep_assert_held(>->irq_lock); 395 396 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 397 398 gt->gt_imr &= ~interrupt_mask; 399 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 400 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 401 } 402 403 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 404 { 405 gen5_gt_update_irq(gt, mask, mask); 406 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 407 } 408 409 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 410 { 411 gen5_gt_update_irq(gt, mask, 0); 412 } 413 414 void gen5_gt_irq_reset(struct intel_gt *gt) 415 { 416 struct intel_uncore *uncore = gt->uncore; 417 418 GEN3_IRQ_RESET(uncore, GT); 419 if (INTEL_GEN(gt->i915) >= 6) 420 GEN3_IRQ_RESET(uncore, GEN6_PM); 421 } 422 423 void gen5_gt_irq_postinstall(struct intel_gt *gt) 424 { 425 struct intel_uncore *uncore = gt->uncore; 426 u32 pm_irqs = 0; 427 u32 gt_irqs = 0; 428 429 gt->gt_imr = ~0; 430 if (HAS_L3_DPF(gt->i915)) { 431 /* L3 parity interrupt is always unmasked. */ 432 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 433 gt_irqs |= GT_PARITY_ERROR(gt->i915); 434 } 435 436 gt_irqs |= GT_RENDER_USER_INTERRUPT; 437 if (IS_GEN(gt->i915, 5)) 438 gt_irqs |= ILK_BSD_USER_INTERRUPT; 439 else 440 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 441 442 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 443 444 if (INTEL_GEN(gt->i915) >= 6) { 445 /* 446 * RPS interrupts will get enabled/disabled on demand when RPS 447 * itself is enabled/disabled. 448 */ 449 if (HAS_ENGINE(gt->i915, VECS0)) { 450 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 451 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 452 } 453 454 gt->pm_imr = 0xffffffff; 455 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 456 } 457 } 458