1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_breadcrumbs.h" 12 #include "intel_gt.h" 13 #include "intel_gt_irq.h" 14 #include "intel_lrc_reg.h" 15 #include "intel_uncore.h" 16 #include "intel_rps.h" 17 18 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 19 { 20 if (iir & GUC_INTR_GUC2HOST) 21 intel_guc_to_host_event_handler(guc); 22 } 23 24 static void 25 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 26 { 27 bool tasklet = false; 28 29 if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { 30 u32 eir; 31 32 /* Upper 16b are the enabling mask, rsvd for internal errors */ 33 eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); 34 ENGINE_TRACE(engine, "CS error: %x\n", eir); 35 36 /* Disable the error interrupt until after the reset */ 37 if (likely(eir)) { 38 ENGINE_WRITE(engine, RING_EMR, ~0u); 39 ENGINE_WRITE(engine, RING_EIR, eir); 40 WRITE_ONCE(engine->execlists.error_interrupt, eir); 41 tasklet = true; 42 } 43 } 44 45 if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { 46 WRITE_ONCE(engine->execlists.yield, 47 ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); 48 ENGINE_TRACE(engine, "semaphore yield: %08x\n", 49 engine->execlists.yield); 50 if (del_timer(&engine->execlists.timer)) 51 tasklet = true; 52 } 53 54 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 55 tasklet = true; 56 57 if (iir & GT_RENDER_USER_INTERRUPT) { 58 intel_engine_signal_breadcrumbs(engine); 59 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 60 } 61 62 if (tasklet) 63 tasklet_hi_schedule(&engine->execlists.tasklet); 64 } 65 66 static u32 67 gen11_gt_engine_identity(struct intel_gt *gt, 68 const unsigned int bank, const unsigned int bit) 69 { 70 void __iomem * const regs = gt->uncore->regs; 71 u32 timeout_ts; 72 u32 ident; 73 74 lockdep_assert_held(>->irq_lock); 75 76 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 77 78 /* 79 * NB: Specs do not specify how long to spin wait, 80 * so we do ~100us as an educated guess. 81 */ 82 timeout_ts = (local_clock() >> 10) + 100; 83 do { 84 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 85 } while (!(ident & GEN11_INTR_DATA_VALID) && 86 !time_after32(local_clock() >> 10, timeout_ts)); 87 88 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 89 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 90 bank, bit, ident); 91 return 0; 92 } 93 94 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 95 GEN11_INTR_DATA_VALID); 96 97 return ident; 98 } 99 100 static void 101 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 102 const u16 iir) 103 { 104 if (instance == OTHER_GUC_INSTANCE) 105 return guc_irq_handler(>->uc.guc, iir); 106 107 if (instance == OTHER_GTPM_INSTANCE) 108 return gen11_rps_irq_handler(>->rps, iir); 109 110 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 111 instance, iir); 112 } 113 114 static void 115 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 116 const u8 instance, const u16 iir) 117 { 118 struct intel_engine_cs *engine; 119 120 if (instance <= MAX_ENGINE_INSTANCE) 121 engine = gt->engine_class[class][instance]; 122 else 123 engine = NULL; 124 125 if (likely(engine)) 126 return cs_irq_handler(engine, iir); 127 128 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 129 class, instance); 130 } 131 132 static void 133 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 134 { 135 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 136 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 137 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 138 139 if (unlikely(!intr)) 140 return; 141 142 if (class <= COPY_ENGINE_CLASS) 143 return gen11_engine_irq_handler(gt, class, instance, intr); 144 145 if (class == OTHER_CLASS) 146 return gen11_other_irq_handler(gt, instance, intr); 147 148 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 149 class, instance, intr); 150 } 151 152 static void 153 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 154 { 155 void __iomem * const regs = gt->uncore->regs; 156 unsigned long intr_dw; 157 unsigned int bit; 158 159 lockdep_assert_held(>->irq_lock); 160 161 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 162 163 for_each_set_bit(bit, &intr_dw, 32) { 164 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 165 166 gen11_gt_identity_handler(gt, ident); 167 } 168 169 /* Clear must be after shared has been served for engine */ 170 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 171 } 172 173 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 174 { 175 unsigned int bank; 176 177 spin_lock(>->irq_lock); 178 179 for (bank = 0; bank < 2; bank++) { 180 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 181 gen11_gt_bank_handler(gt, bank); 182 } 183 184 spin_unlock(>->irq_lock); 185 } 186 187 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 188 const unsigned int bank, const unsigned int bit) 189 { 190 void __iomem * const regs = gt->uncore->regs; 191 u32 dw; 192 193 lockdep_assert_held(>->irq_lock); 194 195 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 196 if (dw & BIT(bit)) { 197 /* 198 * According to the BSpec, DW_IIR bits cannot be cleared without 199 * first servicing the Selector & Shared IIR registers. 200 */ 201 gen11_gt_engine_identity(gt, bank, bit); 202 203 /* 204 * We locked GT INT DW by reading it. If we want to (try 205 * to) recover from this successfully, we need to clear 206 * our bit, otherwise we are locking the register for 207 * everybody. 208 */ 209 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 210 211 return true; 212 } 213 214 return false; 215 } 216 217 void gen11_gt_irq_reset(struct intel_gt *gt) 218 { 219 struct intel_uncore *uncore = gt->uncore; 220 221 /* Disable RCS, BCS, VCS and VECS class engines. */ 222 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 223 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 224 225 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 226 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 227 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 228 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 229 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 230 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 231 232 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 233 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 234 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 235 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 236 } 237 238 void gen11_gt_irq_postinstall(struct intel_gt *gt) 239 { 240 const u32 irqs = 241 GT_CS_MASTER_ERROR_INTERRUPT | 242 GT_RENDER_USER_INTERRUPT | 243 GT_CONTEXT_SWITCH_INTERRUPT | 244 GT_WAIT_SEMAPHORE_INTERRUPT; 245 struct intel_uncore *uncore = gt->uncore; 246 const u32 dmask = irqs << 16 | irqs; 247 const u32 smask = irqs << 16; 248 249 BUILD_BUG_ON(irqs & 0xffff0000); 250 251 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 252 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 253 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 254 255 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 256 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 257 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 258 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 259 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 260 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 261 262 /* 263 * RPS interrupts will get enabled/disabled on demand when RPS itself 264 * is enabled/disabled. 265 */ 266 gt->pm_ier = 0x0; 267 gt->pm_imr = ~gt->pm_ier; 268 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 269 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 270 271 /* Same thing for GuC interrupts */ 272 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 273 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 274 } 275 276 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 277 { 278 if (gt_iir & GT_RENDER_USER_INTERRUPT) 279 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 280 if (gt_iir & ILK_BSD_USER_INTERRUPT) 281 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 282 } 283 284 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 285 { 286 if (!HAS_L3_DPF(gt->i915)) 287 return; 288 289 spin_lock(>->irq_lock); 290 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 291 spin_unlock(>->irq_lock); 292 293 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 294 gt->i915->l3_parity.which_slice |= 1 << 1; 295 296 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 297 gt->i915->l3_parity.which_slice |= 1 << 0; 298 299 schedule_work(>->i915->l3_parity.error_work); 300 } 301 302 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 303 { 304 if (gt_iir & GT_RENDER_USER_INTERRUPT) 305 intel_engine_signal_breadcrumbs(gt->engine_class[RENDER_CLASS][0]); 306 if (gt_iir & GT_BSD_USER_INTERRUPT) 307 intel_engine_signal_breadcrumbs(gt->engine_class[VIDEO_DECODE_CLASS][0]); 308 if (gt_iir & GT_BLT_USER_INTERRUPT) 309 intel_engine_signal_breadcrumbs(gt->engine_class[COPY_ENGINE_CLASS][0]); 310 311 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 312 GT_BSD_CS_ERROR_INTERRUPT | 313 GT_CS_MASTER_ERROR_INTERRUPT)) 314 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 315 316 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 317 gen7_parity_error_irq_handler(gt, gt_iir); 318 } 319 320 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl) 321 { 322 void __iomem * const regs = gt->uncore->regs; 323 u32 iir; 324 325 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 326 iir = raw_reg_read(regs, GEN8_GT_IIR(0)); 327 if (likely(iir)) { 328 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 329 iir >> GEN8_RCS_IRQ_SHIFT); 330 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 331 iir >> GEN8_BCS_IRQ_SHIFT); 332 raw_reg_write(regs, GEN8_GT_IIR(0), iir); 333 } 334 } 335 336 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 337 iir = raw_reg_read(regs, GEN8_GT_IIR(1)); 338 if (likely(iir)) { 339 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 340 iir >> GEN8_VCS0_IRQ_SHIFT); 341 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 342 iir >> GEN8_VCS1_IRQ_SHIFT); 343 raw_reg_write(regs, GEN8_GT_IIR(1), iir); 344 } 345 } 346 347 if (master_ctl & GEN8_GT_VECS_IRQ) { 348 iir = raw_reg_read(regs, GEN8_GT_IIR(3)); 349 if (likely(iir)) { 350 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 351 iir >> GEN8_VECS_IRQ_SHIFT); 352 raw_reg_write(regs, GEN8_GT_IIR(3), iir); 353 } 354 } 355 356 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 357 iir = raw_reg_read(regs, GEN8_GT_IIR(2)); 358 if (likely(iir)) { 359 gen6_rps_irq_handler(>->rps, iir); 360 guc_irq_handler(>->uc.guc, iir >> 16); 361 raw_reg_write(regs, GEN8_GT_IIR(2), iir); 362 } 363 } 364 } 365 366 void gen8_gt_irq_reset(struct intel_gt *gt) 367 { 368 struct intel_uncore *uncore = gt->uncore; 369 370 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 371 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 372 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 373 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 374 } 375 376 void gen8_gt_irq_postinstall(struct intel_gt *gt) 377 { 378 /* These are interrupts we'll toggle with the ring mask register */ 379 const u32 irqs = 380 GT_CS_MASTER_ERROR_INTERRUPT | 381 GT_RENDER_USER_INTERRUPT | 382 GT_CONTEXT_SWITCH_INTERRUPT | 383 GT_WAIT_SEMAPHORE_INTERRUPT; 384 const u32 gt_interrupts[] = { 385 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT, 386 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT, 387 0, 388 irqs << GEN8_VECS_IRQ_SHIFT, 389 }; 390 struct intel_uncore *uncore = gt->uncore; 391 392 gt->pm_ier = 0x0; 393 gt->pm_imr = ~gt->pm_ier; 394 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 395 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 396 /* 397 * RPS interrupts will get enabled/disabled on demand when RPS itself 398 * is enabled/disabled. Same wil be the case for GuC interrupts. 399 */ 400 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 401 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 402 } 403 404 static void gen5_gt_update_irq(struct intel_gt *gt, 405 u32 interrupt_mask, 406 u32 enabled_irq_mask) 407 { 408 lockdep_assert_held(>->irq_lock); 409 410 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 411 412 gt->gt_imr &= ~interrupt_mask; 413 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 414 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 415 } 416 417 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 418 { 419 gen5_gt_update_irq(gt, mask, mask); 420 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 421 } 422 423 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 424 { 425 gen5_gt_update_irq(gt, mask, 0); 426 } 427 428 void gen5_gt_irq_reset(struct intel_gt *gt) 429 { 430 struct intel_uncore *uncore = gt->uncore; 431 432 GEN3_IRQ_RESET(uncore, GT); 433 if (INTEL_GEN(gt->i915) >= 6) 434 GEN3_IRQ_RESET(uncore, GEN6_PM); 435 } 436 437 void gen5_gt_irq_postinstall(struct intel_gt *gt) 438 { 439 struct intel_uncore *uncore = gt->uncore; 440 u32 pm_irqs = 0; 441 u32 gt_irqs = 0; 442 443 gt->gt_imr = ~0; 444 if (HAS_L3_DPF(gt->i915)) { 445 /* L3 parity interrupt is always unmasked. */ 446 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 447 gt_irqs |= GT_PARITY_ERROR(gt->i915); 448 } 449 450 gt_irqs |= GT_RENDER_USER_INTERRUPT; 451 if (IS_GEN(gt->i915, 5)) 452 gt_irqs |= ILK_BSD_USER_INTERRUPT; 453 else 454 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 455 456 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 457 458 if (INTEL_GEN(gt->i915) >= 6) { 459 /* 460 * RPS interrupts will get enabled/disabled on demand when RPS 461 * itself is enabled/disabled. 462 */ 463 if (HAS_ENGINE(gt, VECS0)) { 464 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 465 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 466 } 467 468 gt->pm_imr = 0xffffffff; 469 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 470 } 471 } 472