1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_gt.h" 12 #include "intel_gt_irq.h" 13 #include "intel_uncore.h" 14 #include "intel_rps.h" 15 16 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 17 { 18 if (iir & GUC_INTR_GUC2HOST) 19 intel_guc_to_host_event_handler(guc); 20 } 21 22 static void 23 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 24 { 25 bool tasklet = false; 26 27 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { 28 u32 eir; 29 30 /* Upper 16b are the enabling mask, rsvd for internal errors */ 31 eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); 32 ENGINE_TRACE(engine, "CS error: %x\n", eir); 33 34 /* Disable the error interrupt until after the reset */ 35 if (likely(eir)) { 36 ENGINE_WRITE(engine, RING_EMR, ~0u); 37 ENGINE_WRITE(engine, RING_EIR, eir); 38 WRITE_ONCE(engine->execlists.error_interrupt, eir); 39 tasklet = true; 40 } 41 } 42 43 if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { 44 WRITE_ONCE(engine->execlists.yield, 45 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 46 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 47 engine->execlists.yield); 48 if (del_timer(&engine->execlists.timer)) 49 tasklet = true; 50 } 51 52 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 53 tasklet = true; 54 55 if (iir & GT_RENDER_USER_INTERRUPT) { 56 intel_engine_signal_breadcrumbs(engine); 57 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 58 } 59 60 if (tasklet) 61 tasklet_hi_schedule(&engine->execlists.tasklet); 62 } 63 64 static u32 65 gen11_gt_engine_identity(struct intel_gt *gt, 66 const unsigned int bank, const unsigned int bit) 67 { 68 void __iomem * const regs = gt->uncore->regs; 69 u32 timeout_ts; 70 u32 ident; 71 72 lockdep_assert_held(>->irq_lock); 73 74 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 75 76 /* 77 * NB: Specs do not specify how long to spin wait, 78 * so we do ~100us as an educated guess. 79 */ 80 timeout_ts = (local_clock() >> 10) + 100; 81 do { 82 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 83 } while (!(ident & GEN11_INTR_DATA_VALID) && 84 !time_after32(local_clock() >> 10, timeout_ts)); 85 86 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 87 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 88 bank, bit, ident); 89 return 0; 90 } 91 92 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 93 GEN11_INTR_DATA_VALID); 94 95 return ident; 96 } 97 98 static void 99 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 100 const u16 iir) 101 { 102 if (instance == OTHER_GUC_INSTANCE) 103 return guc_irq_handler(>->uc.guc, iir); 104 105 if (instance == OTHER_GTPM_INSTANCE) 106 return gen11_rps_irq_handler(>->rps, iir); 107 108 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 109 instance, iir); 110 } 111 112 static void 113 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 114 const u8 instance, const u16 iir) 115 { 116 struct intel_engine_cs *engine; 117 118 if (instance <= MAX_ENGINE_INSTANCE) 119 engine = gt->engine_class[class][instance]; 120 else 121 engine = NULL; 122 123 if (likely(engine)) 124 return cs_irq_handler(engine, iir); 125 126 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 127 class, instance); 128 } 129 130 static void 131 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 132 { 133 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 134 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 135 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 136 137 if (unlikely(!intr)) 138 return; 139 140 if (class <= COPY_ENGINE_CLASS) 141 return gen11_engine_irq_handler(gt, class, instance, intr); 142 143 if (class == OTHER_CLASS) 144 return gen11_other_irq_handler(gt, instance, intr); 145 146 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 147 class, instance, intr); 148 } 149 150 static void 151 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 152 { 153 void __iomem * const regs = gt->uncore->regs; 154 unsigned long intr_dw; 155 unsigned int bit; 156 157 lockdep_assert_held(>->irq_lock); 158 159 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 160 161 for_each_set_bit(bit, &intr_dw, 32) { 162 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 163 164 gen11_gt_identity_handler(gt, ident); 165 } 166 167 /* Clear must be after shared has been served for engine */ 168 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 169 } 170 171 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 172 { 173 unsigned int bank; 174 175 spin_lock(>->irq_lock); 176 177 for (bank = 0; bank < 2; bank++) { 178 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 179 gen11_gt_bank_handler(gt, bank); 180 } 181 182 spin_unlock(>->irq_lock); 183 } 184 185 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 186 const unsigned int bank, const unsigned int bit) 187 { 188 void __iomem * const regs = gt->uncore->regs; 189 u32 dw; 190 191 lockdep_assert_held(>->irq_lock); 192 193 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 194 if (dw & BIT(bit)) { 195 /* 196 * According to the BSpec, DW_IIR bits cannot be cleared without 197 * first servicing the Selector & Shared IIR registers. 198 */ 199 gen11_gt_engine_identity(gt, bank, bit); 200 201 /* 202 * We locked GT INT DW by reading it. If we want to (try 203 * to) recover from this successfully, we need to clear 204 * our bit, otherwise we are locking the register for 205 * everybody. 206 */ 207 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 208 209 return true; 210 } 211 212 return false; 213 } 214 215 void gen11_gt_irq_reset(struct intel_gt *gt) 216 { 217 struct intel_uncore *uncore = gt->uncore; 218 219 /* Disable RCS, BCS, VCS and VECS class engines. */ 220 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 221 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 222 223 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 224 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 225 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 226 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 227 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 228 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 229 230 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 231 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 232 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 233 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 234 } 235 236 void gen11_gt_irq_postinstall(struct intel_gt *gt) 237 { 238 const u32 irqs = 239 GT_CS_MASTER_ERROR_INTERRUPT | 240 GT_RENDER_USER_INTERRUPT | 241 GT_CONTEXT_SWITCH_INTERRUPT | 242 GT_WAIT_SEMAPHORE_INTERRUPT; 243 struct intel_uncore *uncore = gt->uncore; 244 const u32 dmask = irqs << 16 | irqs; 245 const u32 smask = irqs << 16; 246 247 BUILD_BUG_ON(irqs & 0xffff0000); 248 249 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 250 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 251 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 252 253 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 254 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 255 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 256 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 257 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 258 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 259 260 /* 261 * RPS interrupts will get enabled/disabled on demand when RPS itself 262 * is enabled/disabled. 263 */ 264 gt->pm_ier = 0x0; 265 gt->pm_imr = ~gt->pm_ier; 266 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 267 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 268 269 /* Same thing for GuC interrupts */ 270 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 271 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 272 } 273 274 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 275 { 276 if (gt_iir & GT_RENDER_USER_INTERRUPT) 277 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 278 if (gt_iir & ILK_BSD_USER_INTERRUPT) 279 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 280 } 281 282 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 283 { 284 if (!HAS_L3_DPF(gt->i915)) 285 return; 286 287 spin_lock(>->irq_lock); 288 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 289 spin_unlock(>->irq_lock); 290 291 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 292 gt->i915->l3_parity.which_slice |= 1 << 1; 293 294 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 295 gt->i915->l3_parity.which_slice |= 1 << 0; 296 297 schedule_work(>->i915->l3_parity.error_work); 298 } 299 300 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 301 { 302 if (gt_iir & GT_RENDER_USER_INTERRUPT) 303 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 304 if (gt_iir & GT_BSD_USER_INTERRUPT) 305 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 306 if (gt_iir & GT_BLT_USER_INTERRUPT) 307 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 308 309 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 310 GT_BSD_CS_ERROR_INTERRUPT | 311 GT_CS_MASTER_ERROR_INTERRUPT)) 312 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 313 314 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 315 gen7_parity_error_irq_handler(gt, gt_iir); 316 } 317 318 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) 319 { 320 void __iomem * const regs = gt->uncore->regs; 321 u32 iir; 322 323 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 324 iir = raw_reg_read(regs, GEN8_GT_IIR(0)); 325 if (likely(iir)) { 326 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 327 iir >> GEN8_RCS_IRQ_SHIFT); 328 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 329 iir >> GEN8_BCS_IRQ_SHIFT); 330 raw_reg_write(regs, GEN8_GT_IIR(0), iir); 331 } 332 } 333 334 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 335 iir = raw_reg_read(regs, GEN8_GT_IIR(1)); 336 if (likely(iir)) { 337 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 338 iir >> GEN8_VCS0_IRQ_SHIFT); 339 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 340 iir >> GEN8_VCS1_IRQ_SHIFT); 341 raw_reg_write(regs, GEN8_GT_IIR(1), iir); 342 } 343 } 344 345 if (master_ctl & GEN8_GT_VECS_IRQ) { 346 iir = raw_reg_read(regs, GEN8_GT_IIR(3)); 347 if (likely(iir)) { 348 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 349 iir >> GEN8_VECS_IRQ_SHIFT); 350 raw_reg_write(regs, GEN8_GT_IIR(3), iir); 351 } 352 } 353 354 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 355 iir = raw_reg_read(regs, GEN8_GT_IIR(2)); 356 if (likely(iir)) { 357 gen6_rps_irq_handler(>->rps, iir); 358 guc_irq_handler(>->uc.guc, iir >> 16); 359 raw_reg_write(regs, GEN8_GT_IIR(2), iir); 360 } 361 } 362 } 363 364 void gen8_gt_irq_reset(struct intel_gt *gt) 365 { 366 struct intel_uncore *uncore = gt->uncore; 367 368 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 369 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 370 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 371 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 372 } 373 374 void gen8_gt_irq_postinstall(struct intel_gt *gt) 375 { 376 /* These are interrupts we'll toggle with the ring mask register */ 377 const u32 irqs = 378 GT_CS_MASTER_ERROR_INTERRUPT | 379 GT_RENDER_USER_INTERRUPT | 380 GT_CONTEXT_SWITCH_INTERRUPT | 381 GT_WAIT_SEMAPHORE_INTERRUPT; 382 const u32 gt_interrupts[] = { 383 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, 384 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 385 0, 386 irqs << GEN8_VECS_IRQ_SHIFT, 387 }; 388 struct intel_uncore *uncore = gt->uncore; 389 390 gt->pm_ier = 0x0; 391 gt->pm_imr = ~gt->pm_ier; 392 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 393 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 394 /* 395 * RPS interrupts will get enabled/disabled on demand when RPS itself 396 * is enabled/disabled. Same wil be the case for GuC interrupts. 397 */ 398 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 399 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 400 } 401 402 static void gen5_gt_update_irq(struct intel_gt *gt, 403 u32 interrupt_mask, 404 u32 enabled_irq_mask) 405 { 406 lockdep_assert_held(>->irq_lock); 407 408 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 409 410 gt->gt_imr &= ~interrupt_mask; 411 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 412 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 413 } 414 415 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 416 { 417 gen5_gt_update_irq(gt, mask, mask); 418 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 419 } 420 421 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 422 { 423 gen5_gt_update_irq(gt, mask, 0); 424 } 425 426 void gen5_gt_irq_reset(struct intel_gt *gt) 427 { 428 struct intel_uncore *uncore = gt->uncore; 429 430 GEN3_IRQ_RESET(uncore, GT); 431 if (INTEL_GEN(gt->i915) >= 6) 432 GEN3_IRQ_RESET(uncore, GEN6_PM); 433 } 434 435 void gen5_gt_irq_postinstall(struct intel_gt *gt) 436 { 437 struct intel_uncore *uncore = gt->uncore; 438 u32 pm_irqs = 0; 439 u32 gt_irqs = 0; 440 441 gt->gt_imr = ~0; 442 if (HAS_L3_DPF(gt->i915)) { 443 /* L3 parity interrupt is always unmasked. */ 444 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 445 gt_irqs |= GT_PARITY_ERROR(gt->i915); 446 } 447 448 gt_irqs |= GT_RENDER_USER_INTERRUPT; 449 if (IS_GEN(gt->i915, 5)) 450 gt_irqs |= ILK_BSD_USER_INTERRUPT; 451 else 452 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 453 454 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 455 456 if (INTEL_GEN(gt->i915) >= 6) { 457 /* 458 * RPS interrupts will get enabled/disabled on demand when RPS 459 * itself is enabled/disabled. 460 */ 461 if (HAS_ENGINE(gt, VECS0)) { 462 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 463 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 464 } 465 466 gt->pm_imr = 0xffffffff; 467 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 468 } 469 } 470