1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_gt.h" 12 #include "intel_gt_irq.h" 13 #include "intel_uncore.h" 14 #include "intel_rps.h" 15 16 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 17 { 18 if (iir & GUC_INTR_GUC2HOST) 19 intel_guc_to_host_event_handler(guc); 20 } 21 22 static void 23 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 24 { 25 bool tasklet = false; 26 27 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { 28 u32 eir; 29 30 eir = ENGINE_READ(engine, RING_EIR); 31 ENGINE_TRACE(engine, "CS error: %x\n", eir); 32 33 /* Disable the error interrupt until after the reset */ 34 if (likely(eir)) { 35 ENGINE_WRITE(engine, RING_EMR, ~0u); 36 ENGINE_WRITE(engine, RING_EIR, eir); 37 WRITE_ONCE(engine->execlists.error_interrupt, eir); 38 tasklet = true; 39 } 40 } 41 42 if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { 43 WRITE_ONCE(engine->execlists.yield, 44 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 45 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 46 engine->execlists.yield); 47 if (del_timer(&engine->execlists.timer)) 48 tasklet = true; 49 } 50 51 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 52 tasklet = true; 53 54 if (iir & GT_RENDER_USER_INTERRUPT) { 55 intel_engine_signal_breadcrumbs(engine); 56 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 57 } 58 59 if (tasklet) 60 tasklet_hi_schedule(&engine->execlists.tasklet); 61 } 62 63 static u32 64 gen11_gt_engine_identity(struct intel_gt *gt, 65 const unsigned int bank, const unsigned int bit) 66 { 67 void __iomem * const regs = gt->uncore->regs; 68 u32 timeout_ts; 69 u32 ident; 70 71 lockdep_assert_held(>->irq_lock); 72 73 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 74 75 /* 76 * NB: Specs do not specify how long to spin wait, 77 * so we do ~100us as an educated guess. 78 */ 79 timeout_ts = (local_clock() >> 10) + 100; 80 do { 81 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 82 } while (!(ident & GEN11_INTR_DATA_VALID) && 83 !time_after32(local_clock() >> 10, timeout_ts)); 84 85 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 86 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 87 bank, bit, ident); 88 return 0; 89 } 90 91 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 92 GEN11_INTR_DATA_VALID); 93 94 return ident; 95 } 96 97 static void 98 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 99 const u16 iir) 100 { 101 if (instance == OTHER_GUC_INSTANCE) 102 return guc_irq_handler(>->uc.guc, iir); 103 104 if (instance == OTHER_GTPM_INSTANCE) 105 return gen11_rps_irq_handler(>->rps, iir); 106 107 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 108 instance, iir); 109 } 110 111 static void 112 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 113 const u8 instance, const u16 iir) 114 { 115 struct intel_engine_cs *engine; 116 117 if (instance <= MAX_ENGINE_INSTANCE) 118 engine = gt->engine_class[class][instance]; 119 else 120 engine = NULL; 121 122 if (likely(engine)) 123 return cs_irq_handler(engine, iir); 124 125 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 126 class, instance); 127 } 128 129 static void 130 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 131 { 132 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 133 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 134 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 135 136 if (unlikely(!intr)) 137 return; 138 139 if (class <= COPY_ENGINE_CLASS) 140 return gen11_engine_irq_handler(gt, class, instance, intr); 141 142 if (class == OTHER_CLASS) 143 return gen11_other_irq_handler(gt, instance, intr); 144 145 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 146 class, instance, intr); 147 } 148 149 static void 150 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 151 { 152 void __iomem * const regs = gt->uncore->regs; 153 unsigned long intr_dw; 154 unsigned int bit; 155 156 lockdep_assert_held(>->irq_lock); 157 158 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 159 160 for_each_set_bit(bit, &intr_dw, 32) { 161 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 162 163 gen11_gt_identity_handler(gt, ident); 164 } 165 166 /* Clear must be after shared has been served for engine */ 167 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 168 } 169 170 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 171 { 172 unsigned int bank; 173 174 spin_lock(>->irq_lock); 175 176 for (bank = 0; bank < 2; bank++) { 177 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 178 gen11_gt_bank_handler(gt, bank); 179 } 180 181 spin_unlock(>->irq_lock); 182 } 183 184 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 185 const unsigned int bank, const unsigned int bit) 186 { 187 void __iomem * const regs = gt->uncore->regs; 188 u32 dw; 189 190 lockdep_assert_held(>->irq_lock); 191 192 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 193 if (dw & BIT(bit)) { 194 /* 195 * According to the BSpec, DW_IIR bits cannot be cleared without 196 * first servicing the Selector & Shared IIR registers. 197 */ 198 gen11_gt_engine_identity(gt, bank, bit); 199 200 /* 201 * We locked GT INT DW by reading it. If we want to (try 202 * to) recover from this successfully, we need to clear 203 * our bit, otherwise we are locking the register for 204 * everybody. 205 */ 206 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 207 208 return true; 209 } 210 211 return false; 212 } 213 214 void gen11_gt_irq_reset(struct intel_gt *gt) 215 { 216 struct intel_uncore *uncore = gt->uncore; 217 218 /* Disable RCS, BCS, VCS and VECS class engines. */ 219 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 220 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 221 222 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 223 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 224 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 225 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 226 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 227 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 228 229 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 230 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 231 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 232 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 233 } 234 235 void gen11_gt_irq_postinstall(struct intel_gt *gt) 236 { 237 const u32 irqs = 238 GT_CS_MASTER_ERROR_INTERRUPT | 239 GT_RENDER_USER_INTERRUPT | 240 GT_CONTEXT_SWITCH_INTERRUPT | 241 GT_WAIT_SEMAPHORE_INTERRUPT; 242 struct intel_uncore *uncore = gt->uncore; 243 const u32 dmask = irqs << 16 | irqs; 244 const u32 smask = irqs << 16; 245 246 BUILD_BUG_ON(irqs & 0xffff0000); 247 248 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 249 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 250 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 251 252 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 253 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 254 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 255 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 256 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 257 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 258 259 /* 260 * RPS interrupts will get enabled/disabled on demand when RPS itself 261 * is enabled/disabled. 262 */ 263 gt->pm_ier = 0x0; 264 gt->pm_imr = ~gt->pm_ier; 265 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 266 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 267 268 /* Same thing for GuC interrupts */ 269 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 270 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 271 } 272 273 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 274 { 275 if (gt_iir & GT_RENDER_USER_INTERRUPT) 276 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 277 if (gt_iir & ILK_BSD_USER_INTERRUPT) 278 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 279 } 280 281 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 282 { 283 if (!HAS_L3_DPF(gt->i915)) 284 return; 285 286 spin_lock(>->irq_lock); 287 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 288 spin_unlock(>->irq_lock); 289 290 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 291 gt->i915->l3_parity.which_slice |= 1 << 1; 292 293 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 294 gt->i915->l3_parity.which_slice |= 1 << 0; 295 296 schedule_work(>->i915->l3_parity.error_work); 297 } 298 299 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 300 { 301 if (gt_iir & GT_RENDER_USER_INTERRUPT) 302 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 303 if (gt_iir & GT_BSD_USER_INTERRUPT) 304 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 305 if (gt_iir & GT_BLT_USER_INTERRUPT) 306 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 307 308 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 309 GT_BSD_CS_ERROR_INTERRUPT | 310 GT_CS_MASTER_ERROR_INTERRUPT)) 311 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 312 313 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 314 gen7_parity_error_irq_handler(gt, gt_iir); 315 } 316 317 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) 318 { 319 void __iomem * const regs = gt->uncore->regs; 320 u32 iir; 321 322 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 323 iir = raw_reg_read(regs, GEN8_GT_IIR(0)); 324 if (likely(iir)) { 325 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 326 iir >> GEN8_RCS_IRQ_SHIFT); 327 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 328 iir >> GEN8_BCS_IRQ_SHIFT); 329 raw_reg_write(regs, GEN8_GT_IIR(0), iir); 330 } 331 } 332 333 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 334 iir = raw_reg_read(regs, GEN8_GT_IIR(1)); 335 if (likely(iir)) { 336 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 337 iir >> GEN8_VCS0_IRQ_SHIFT); 338 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 339 iir >> GEN8_VCS1_IRQ_SHIFT); 340 raw_reg_write(regs, GEN8_GT_IIR(1), iir); 341 } 342 } 343 344 if (master_ctl & GEN8_GT_VECS_IRQ) { 345 iir = raw_reg_read(regs, GEN8_GT_IIR(3)); 346 if (likely(iir)) { 347 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 348 iir >> GEN8_VECS_IRQ_SHIFT); 349 raw_reg_write(regs, GEN8_GT_IIR(3), iir); 350 } 351 } 352 353 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 354 iir = raw_reg_read(regs, GEN8_GT_IIR(2)); 355 if (likely(iir)) { 356 gen6_rps_irq_handler(>->rps, iir); 357 guc_irq_handler(>->uc.guc, iir >> 16); 358 raw_reg_write(regs, GEN8_GT_IIR(2), iir); 359 } 360 } 361 } 362 363 void gen8_gt_irq_reset(struct intel_gt *gt) 364 { 365 struct intel_uncore *uncore = gt->uncore; 366 367 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 368 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 369 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 370 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 371 } 372 373 void gen8_gt_irq_postinstall(struct intel_gt *gt) 374 { 375 /* These are interrupts we'll toggle with the ring mask register */ 376 const u32 irqs = 377 GT_CS_MASTER_ERROR_INTERRUPT | 378 GT_RENDER_USER_INTERRUPT | 379 GT_CONTEXT_SWITCH_INTERRUPT | 380 GT_WAIT_SEMAPHORE_INTERRUPT; 381 const u32 gt_interrupts[] = { 382 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, 383 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 384 0, 385 irqs << GEN8_VECS_IRQ_SHIFT, 386 }; 387 struct intel_uncore *uncore = gt->uncore; 388 389 gt->pm_ier = 0x0; 390 gt->pm_imr = ~gt->pm_ier; 391 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 392 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 393 /* 394 * RPS interrupts will get enabled/disabled on demand when RPS itself 395 * is enabled/disabled. Same wil be the case for GuC interrupts. 396 */ 397 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 398 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 399 } 400 401 static void gen5_gt_update_irq(struct intel_gt *gt, 402 u32 interrupt_mask, 403 u32 enabled_irq_mask) 404 { 405 lockdep_assert_held(>->irq_lock); 406 407 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 408 409 gt->gt_imr &= ~interrupt_mask; 410 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 411 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 412 } 413 414 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 415 { 416 gen5_gt_update_irq(gt, mask, mask); 417 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 418 } 419 420 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 421 { 422 gen5_gt_update_irq(gt, mask, 0); 423 } 424 425 void gen5_gt_irq_reset(struct intel_gt *gt) 426 { 427 struct intel_uncore *uncore = gt->uncore; 428 429 GEN3_IRQ_RESET(uncore, GT); 430 if (INTEL_GEN(gt->i915) >= 6) 431 GEN3_IRQ_RESET(uncore, GEN6_PM); 432 } 433 434 void gen5_gt_irq_postinstall(struct intel_gt *gt) 435 { 436 struct intel_uncore *uncore = gt->uncore; 437 u32 pm_irqs = 0; 438 u32 gt_irqs = 0; 439 440 gt->gt_imr = ~0; 441 if (HAS_L3_DPF(gt->i915)) { 442 /* L3 parity interrupt is always unmasked. */ 443 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 444 gt_irqs |= GT_PARITY_ERROR(gt->i915); 445 } 446 447 gt_irqs |= GT_RENDER_USER_INTERRUPT; 448 if (IS_GEN(gt->i915, 5)) 449 gt_irqs |= ILK_BSD_USER_INTERRUPT; 450 else 451 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 452 453 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 454 455 if (INTEL_GEN(gt->i915) >= 6) { 456 /* 457 * RPS interrupts will get enabled/disabled on demand when RPS 458 * itself is enabled/disabled. 459 */ 460 if (HAS_ENGINE(gt->i915, VECS0)) { 461 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 462 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 463 } 464 465 gt->pm_imr = 0xffffffff; 466 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 467 } 468 } 469