1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_breadcrumbs.h" 12 #include "intel_gt.h" 13 #include "intel_gt_irq.h" 14 #include "intel_uncore.h" 15 #include "intel_rps.h" 16 17 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 18 { 19 if (iir & GUC_INTR_GUC2HOST) 20 intel_guc_to_host_event_handler(guc); 21 } 22 23 static void 24 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 25 { 26 bool tasklet = false; 27 28 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { 29 u32 eir; 30 31 /* Upper 16b are the enabling mask, rsvd for internal errors */ 32 eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); 33 ENGINE_TRACE(engine, "CS error: %x\n", eir); 34 35 /* Disable the error interrupt until after the reset */ 36 if (likely(eir)) { 37 ENGINE_WRITE(engine, RING_EMR, ~0u); 38 ENGINE_WRITE(engine, RING_EIR, eir); 39 WRITE_ONCE(engine->execlists.error_interrupt, eir); 40 tasklet = true; 41 } 42 } 43 44 if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { 45 WRITE_ONCE(engine->execlists.yield, 46 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 47 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 48 engine->execlists.yield); 49 if (del_timer(&engine->execlists.timer)) 50 tasklet = true; 51 } 52 53 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 54 tasklet = true; 55 56 if (iir & GT_RENDER_USER_INTERRUPT) { 57 intel_engine_signal_breadcrumbs(engine); 58 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 59 } 60 61 if (tasklet) 62 tasklet_hi_schedule(&engine->execlists.tasklet); 63 } 64 65 static u32 66 gen11_gt_engine_identity(struct intel_gt *gt, 67 const unsigned int bank, const unsigned int bit) 68 { 69 void __iomem * const regs = gt->uncore->regs; 70 u32 timeout_ts; 71 u32 ident; 72 73 lockdep_assert_held(>->irq_lock); 74 75 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 76 77 /* 78 * NB: Specs do not specify how long to spin wait, 79 * so we do ~100us as an educated guess. 80 */ 81 timeout_ts = (local_clock() >> 10) + 100; 82 do { 83 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 84 } while (!(ident & GEN11_INTR_DATA_VALID) && 85 !time_after32(local_clock() >> 10, timeout_ts)); 86 87 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 88 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 89 bank, bit, ident); 90 return 0; 91 } 92 93 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 94 GEN11_INTR_DATA_VALID); 95 96 return ident; 97 } 98 99 static void 100 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 101 const u16 iir) 102 { 103 if (instance == OTHER_GUC_INSTANCE) 104 return guc_irq_handler(>->uc.guc, iir); 105 106 if (instance == OTHER_GTPM_INSTANCE) 107 return gen11_rps_irq_handler(>->rps, iir); 108 109 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 110 instance, iir); 111 } 112 113 static void 114 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 115 const u8 instance, const u16 iir) 116 { 117 struct intel_engine_cs *engine; 118 119 if (instance <= MAX_ENGINE_INSTANCE) 120 engine = gt->engine_class[class][instance]; 121 else 122 engine = NULL; 123 124 if (likely(engine)) 125 return cs_irq_handler(engine, iir); 126 127 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 128 class, instance); 129 } 130 131 static void 132 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 133 { 134 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 135 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 136 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 137 138 if (unlikely(!intr)) 139 return; 140 141 if (class <= COPY_ENGINE_CLASS) 142 return gen11_engine_irq_handler(gt, class, instance, intr); 143 144 if (class == OTHER_CLASS) 145 return gen11_other_irq_handler(gt, instance, intr); 146 147 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 148 class, instance, intr); 149 } 150 151 static void 152 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 153 { 154 void __iomem * const regs = gt->uncore->regs; 155 unsigned long intr_dw; 156 unsigned int bit; 157 158 lockdep_assert_held(>->irq_lock); 159 160 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 161 162 for_each_set_bit(bit, &intr_dw, 32) { 163 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 164 165 gen11_gt_identity_handler(gt, ident); 166 } 167 168 /* Clear must be after shared has been served for engine */ 169 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 170 } 171 172 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 173 { 174 unsigned int bank; 175 176 spin_lock(>->irq_lock); 177 178 for (bank = 0; bank < 2; bank++) { 179 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 180 gen11_gt_bank_handler(gt, bank); 181 } 182 183 spin_unlock(>->irq_lock); 184 } 185 186 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 187 const unsigned int bank, const unsigned int bit) 188 { 189 void __iomem * const regs = gt->uncore->regs; 190 u32 dw; 191 192 lockdep_assert_held(>->irq_lock); 193 194 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 195 if (dw & BIT(bit)) { 196 /* 197 * According to the BSpec, DW_IIR bits cannot be cleared without 198 * first servicing the Selector & Shared IIR registers. 199 */ 200 gen11_gt_engine_identity(gt, bank, bit); 201 202 /* 203 * We locked GT INT DW by reading it. If we want to (try 204 * to) recover from this successfully, we need to clear 205 * our bit, otherwise we are locking the register for 206 * everybody. 207 */ 208 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 209 210 return true; 211 } 212 213 return false; 214 } 215 216 void gen11_gt_irq_reset(struct intel_gt *gt) 217 { 218 struct intel_uncore *uncore = gt->uncore; 219 220 /* Disable RCS, BCS, VCS and VECS class engines. */ 221 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 222 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 223 224 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 225 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 226 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 227 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 228 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 229 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 230 231 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 232 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 233 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 234 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 235 } 236 237 void gen11_gt_irq_postinstall(struct intel_gt *gt) 238 { 239 const u32 irqs = 240 GT_CS_MASTER_ERROR_INTERRUPT | 241 GT_RENDER_USER_INTERRUPT | 242 GT_CONTEXT_SWITCH_INTERRUPT | 243 GT_WAIT_SEMAPHORE_INTERRUPT; 244 struct intel_uncore *uncore = gt->uncore; 245 const u32 dmask = irqs << 16 | irqs; 246 const u32 smask = irqs << 16; 247 248 BUILD_BUG_ON(irqs & 0xffff0000); 249 250 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 251 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 252 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 253 254 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 255 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 256 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 257 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 258 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 259 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 260 261 /* 262 * RPS interrupts will get enabled/disabled on demand when RPS itself 263 * is enabled/disabled. 264 */ 265 gt->pm_ier = 0x0; 266 gt->pm_imr = ~gt->pm_ier; 267 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 268 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 269 270 /* Same thing for GuC interrupts */ 271 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 272 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 273 } 274 275 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 276 { 277 if (gt_iir & GT_RENDER_USER_INTERRUPT) 278 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 279 if (gt_iir & ILK_BSD_USER_INTERRUPT) 280 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 281 } 282 283 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 284 { 285 if (!HAS_L3_DPF(gt->i915)) 286 return; 287 288 spin_lock(>->irq_lock); 289 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 290 spin_unlock(>->irq_lock); 291 292 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 293 gt->i915->l3_parity.which_slice |= 1 << 1; 294 295 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 296 gt->i915->l3_parity.which_slice |= 1 << 0; 297 298 schedule_work(>->i915->l3_parity.error_work); 299 } 300 301 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 302 { 303 if (gt_iir & GT_RENDER_USER_INTERRUPT) 304 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 305 if (gt_iir & GT_BSD_USER_INTERRUPT) 306 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 307 if (gt_iir & GT_BLT_USER_INTERRUPT) 308 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 309 310 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 311 GT_BSD_CS_ERROR_INTERRUPT | 312 GT_CS_MASTER_ERROR_INTERRUPT)) 313 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 314 315 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 316 gen7_parity_error_irq_handler(gt, gt_iir); 317 } 318 319 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) 320 { 321 void __iomem * const regs = gt->uncore->regs; 322 u32 iir; 323 324 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 325 iir = raw_reg_read(regs, GEN8_GT_IIR(0)); 326 if (likely(iir)) { 327 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 328 iir >> GEN8_RCS_IRQ_SHIFT); 329 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 330 iir >> GEN8_BCS_IRQ_SHIFT); 331 raw_reg_write(regs, GEN8_GT_IIR(0), iir); 332 } 333 } 334 335 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 336 iir = raw_reg_read(regs, GEN8_GT_IIR(1)); 337 if (likely(iir)) { 338 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 339 iir >> GEN8_VCS0_IRQ_SHIFT); 340 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 341 iir >> GEN8_VCS1_IRQ_SHIFT); 342 raw_reg_write(regs, GEN8_GT_IIR(1), iir); 343 } 344 } 345 346 if (master_ctl & GEN8_GT_VECS_IRQ) { 347 iir = raw_reg_read(regs, GEN8_GT_IIR(3)); 348 if (likely(iir)) { 349 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 350 iir >> GEN8_VECS_IRQ_SHIFT); 351 raw_reg_write(regs, GEN8_GT_IIR(3), iir); 352 } 353 } 354 355 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 356 iir = raw_reg_read(regs, GEN8_GT_IIR(2)); 357 if (likely(iir)) { 358 gen6_rps_irq_handler(>->rps, iir); 359 guc_irq_handler(>->uc.guc, iir >> 16); 360 raw_reg_write(regs, GEN8_GT_IIR(2), iir); 361 } 362 } 363 } 364 365 void gen8_gt_irq_reset(struct intel_gt *gt) 366 { 367 struct intel_uncore *uncore = gt->uncore; 368 369 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 370 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 371 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 372 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 373 } 374 375 void gen8_gt_irq_postinstall(struct intel_gt *gt) 376 { 377 /* These are interrupts we'll toggle with the ring mask register */ 378 const u32 irqs = 379 GT_CS_MASTER_ERROR_INTERRUPT | 380 GT_RENDER_USER_INTERRUPT | 381 GT_CONTEXT_SWITCH_INTERRUPT | 382 GT_WAIT_SEMAPHORE_INTERRUPT; 383 const u32 gt_interrupts[] = { 384 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, 385 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 386 0, 387 irqs << GEN8_VECS_IRQ_SHIFT, 388 }; 389 struct intel_uncore *uncore = gt->uncore; 390 391 gt->pm_ier = 0x0; 392 gt->pm_imr = ~gt->pm_ier; 393 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 394 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 395 /* 396 * RPS interrupts will get enabled/disabled on demand when RPS itself 397 * is enabled/disabled. Same wil be the case for GuC interrupts. 398 */ 399 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 400 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 401 } 402 403 static void gen5_gt_update_irq(struct intel_gt *gt, 404 u32 interrupt_mask, 405 u32 enabled_irq_mask) 406 { 407 lockdep_assert_held(>->irq_lock); 408 409 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 410 411 gt->gt_imr &= ~interrupt_mask; 412 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 413 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 414 } 415 416 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 417 { 418 gen5_gt_update_irq(gt, mask, mask); 419 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 420 } 421 422 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 423 { 424 gen5_gt_update_irq(gt, mask, 0); 425 } 426 427 void gen5_gt_irq_reset(struct intel_gt *gt) 428 { 429 struct intel_uncore *uncore = gt->uncore; 430 431 GEN3_IRQ_RESET(uncore, GT); 432 if (INTEL_GEN(gt->i915) >= 6) 433 GEN3_IRQ_RESET(uncore, GEN6_PM); 434 } 435 436 void gen5_gt_irq_postinstall(struct intel_gt *gt) 437 { 438 struct intel_uncore *uncore = gt->uncore; 439 u32 pm_irqs = 0; 440 u32 gt_irqs = 0; 441 442 gt->gt_imr = ~0; 443 if (HAS_L3_DPF(gt->i915)) { 444 /* L3 parity interrupt is always unmasked. */ 445 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 446 gt_irqs |= GT_PARITY_ERROR(gt->i915); 447 } 448 449 gt_irqs |= GT_RENDER_USER_INTERRUPT; 450 if (IS_GEN(gt->i915, 5)) 451 gt_irqs |= ILK_BSD_USER_INTERRUPT; 452 else 453 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 454 455 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 456 457 if (INTEL_GEN(gt->i915) >= 6) { 458 /* 459 * RPS interrupts will get enabled/disabled on demand when RPS 460 * itself is enabled/disabled. 461 */ 462 if (HAS_ENGINE(gt, VECS0)) { 463 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 464 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 465 } 466 467 gt->pm_imr = 0xffffffff; 468 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 469 } 470 } 471