1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/sched/clock.h> 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_gt.h" 12 #include "intel_gt_irq.h" 13 #include "intel_uncore.h" 14 15 static void guc_irq_handler(struct intel_guc *guc, u16 iir) 16 { 17 if (iir & GUC_INTR_GUC2HOST) 18 intel_guc_to_host_event_handler(guc); 19 } 20 21 static void 22 cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 23 { 24 bool tasklet = false; 25 26 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 27 tasklet = true; 28 29 if (iir & GT_RENDER_USER_INTERRUPT) { 30 intel_engine_breadcrumbs_irq(engine); 31 tasklet |= intel_engine_needs_breadcrumb_tasklet(engine); 32 } 33 34 if (tasklet) 35 tasklet_hi_schedule(&engine->execlists.tasklet); 36 } 37 38 static u32 39 gen11_gt_engine_identity(struct intel_gt *gt, 40 const unsigned int bank, const unsigned int bit) 41 { 42 void __iomem * const regs = gt->uncore->regs; 43 u32 timeout_ts; 44 u32 ident; 45 46 lockdep_assert_held(>->irq_lock); 47 48 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 49 50 /* 51 * NB: Specs do not specify how long to spin wait, 52 * so we do ~100us as an educated guess. 53 */ 54 timeout_ts = (local_clock() >> 10) + 100; 55 do { 56 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 57 } while (!(ident & GEN11_INTR_DATA_VALID) && 58 !time_after32(local_clock() >> 10, timeout_ts)); 59 60 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 61 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 62 bank, bit, ident); 63 return 0; 64 } 65 66 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 67 GEN11_INTR_DATA_VALID); 68 69 return ident; 70 } 71 72 static void 73 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, 74 const u16 iir) 75 { 76 if (instance == OTHER_GUC_INSTANCE) 77 return guc_irq_handler(>->uc.guc, iir); 78 79 if (instance == OTHER_GTPM_INSTANCE) 80 return gen11_rps_irq_handler(gt, iir); 81 82 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 83 instance, iir); 84 } 85 86 static void 87 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class, 88 const u8 instance, const u16 iir) 89 { 90 struct intel_engine_cs *engine; 91 92 if (instance <= MAX_ENGINE_INSTANCE) 93 engine = gt->engine_class[class][instance]; 94 else 95 engine = NULL; 96 97 if (likely(engine)) 98 return cs_irq_handler(engine, iir); 99 100 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 101 class, instance); 102 } 103 104 static void 105 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity) 106 { 107 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 108 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 109 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 110 111 if (unlikely(!intr)) 112 return; 113 114 if (class <= COPY_ENGINE_CLASS) 115 return gen11_engine_irq_handler(gt, class, instance, intr); 116 117 if (class == OTHER_CLASS) 118 return gen11_other_irq_handler(gt, instance, intr); 119 120 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 121 class, instance, intr); 122 } 123 124 static void 125 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank) 126 { 127 void __iomem * const regs = gt->uncore->regs; 128 unsigned long intr_dw; 129 unsigned int bit; 130 131 lockdep_assert_held(>->irq_lock); 132 133 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 134 135 for_each_set_bit(bit, &intr_dw, 32) { 136 const u32 ident = gen11_gt_engine_identity(gt, bank, bit); 137 138 gen11_gt_identity_handler(gt, ident); 139 } 140 141 /* Clear must be after shared has been served for engine */ 142 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 143 } 144 145 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl) 146 { 147 unsigned int bank; 148 149 spin_lock(>->irq_lock); 150 151 for (bank = 0; bank < 2; bank++) { 152 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 153 gen11_gt_bank_handler(gt, bank); 154 } 155 156 spin_unlock(>->irq_lock); 157 } 158 159 bool gen11_gt_reset_one_iir(struct intel_gt *gt, 160 const unsigned int bank, const unsigned int bit) 161 { 162 void __iomem * const regs = gt->uncore->regs; 163 u32 dw; 164 165 lockdep_assert_held(>->irq_lock); 166 167 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 168 if (dw & BIT(bit)) { 169 /* 170 * According to the BSpec, DW_IIR bits cannot be cleared without 171 * first servicing the Selector & Shared IIR registers. 172 */ 173 gen11_gt_engine_identity(gt, bank, bit); 174 175 /* 176 * We locked GT INT DW by reading it. If we want to (try 177 * to) recover from this successfully, we need to clear 178 * our bit, otherwise we are locking the register for 179 * everybody. 180 */ 181 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 182 183 return true; 184 } 185 186 return false; 187 } 188 189 void gen11_gt_irq_reset(struct intel_gt *gt) 190 { 191 struct intel_uncore *uncore = gt->uncore; 192 193 /* Disable RCS, BCS, VCS and VECS class engines. */ 194 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0); 195 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0); 196 197 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 198 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0); 199 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0); 200 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0); 201 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0); 202 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0); 203 204 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 205 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 206 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 207 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 208 } 209 210 void gen11_gt_irq_postinstall(struct intel_gt *gt) 211 { 212 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 213 struct intel_uncore *uncore = gt->uncore; 214 const u32 dmask = irqs << 16 | irqs; 215 const u32 smask = irqs << 16; 216 217 BUILD_BUG_ON(irqs & 0xffff0000); 218 219 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 220 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask); 221 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask); 222 223 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 224 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask); 225 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask); 226 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask); 227 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask); 228 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask); 229 230 /* 231 * RPS interrupts will get enabled/disabled on demand when RPS itself 232 * is enabled/disabled. 233 */ 234 gt->pm_ier = 0x0; 235 gt->pm_imr = ~gt->pm_ier; 236 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 237 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 238 239 /* Same thing for GuC interrupts */ 240 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0); 241 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0); 242 } 243 244 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 245 { 246 if (gt_iir & GT_RENDER_USER_INTERRUPT) 247 intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); 248 if (gt_iir & ILK_BSD_USER_INTERRUPT) 249 intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); 250 } 251 252 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir) 253 { 254 if (!HAS_L3_DPF(gt->i915)) 255 return; 256 257 spin_lock(>->irq_lock); 258 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915)); 259 spin_unlock(>->irq_lock); 260 261 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 262 gt->i915->l3_parity.which_slice |= 1 << 1; 263 264 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 265 gt->i915->l3_parity.which_slice |= 1 << 0; 266 267 schedule_work(>->i915->l3_parity.error_work); 268 } 269 270 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir) 271 { 272 if (gt_iir & GT_RENDER_USER_INTERRUPT) 273 intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]); 274 if (gt_iir & GT_BSD_USER_INTERRUPT) 275 intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]); 276 if (gt_iir & GT_BLT_USER_INTERRUPT) 277 intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]); 278 279 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 280 GT_BSD_CS_ERROR_INTERRUPT | 281 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 282 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 283 284 if (gt_iir & GT_PARITY_ERROR(gt->i915)) 285 gen7_parity_error_irq_handler(gt, gt_iir); 286 } 287 288 void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) 289 { 290 void __iomem * const regs = gt->uncore->regs; 291 292 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 293 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 294 if (likely(gt_iir[0])) 295 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 296 } 297 298 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 299 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 300 if (likely(gt_iir[1])) 301 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 302 } 303 304 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 305 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 306 if (likely(gt_iir[2])) 307 raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]); 308 } 309 310 if (master_ctl & GEN8_GT_VECS_IRQ) { 311 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 312 if (likely(gt_iir[3])) 313 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 314 } 315 } 316 317 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) 318 { 319 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 320 cs_irq_handler(gt->engine_class[RENDER_CLASS][0], 321 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 322 cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0], 323 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 324 } 325 326 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { 327 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0], 328 gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); 329 cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1], 330 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 331 } 332 333 if (master_ctl & GEN8_GT_VECS_IRQ) { 334 cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0], 335 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 336 } 337 338 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 339 gen6_rps_irq_handler(gt->i915, gt_iir[2]); 340 guc_irq_handler(>->uc.guc, gt_iir[2] >> 16); 341 } 342 } 343 344 void gen8_gt_irq_reset(struct intel_gt *gt) 345 { 346 struct intel_uncore *uncore = gt->uncore; 347 348 GEN8_IRQ_RESET_NDX(uncore, GT, 0); 349 GEN8_IRQ_RESET_NDX(uncore, GT, 1); 350 GEN8_IRQ_RESET_NDX(uncore, GT, 2); 351 GEN8_IRQ_RESET_NDX(uncore, GT, 3); 352 } 353 354 void gen8_gt_irq_postinstall(struct intel_gt *gt) 355 { 356 struct intel_uncore *uncore = gt->uncore; 357 358 /* These are interrupts we'll toggle with the ring mask register */ 359 u32 gt_interrupts[] = { 360 (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 361 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 362 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 363 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), 364 365 (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 366 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | 367 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 368 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), 369 370 0, 371 372 (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 373 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) 374 }; 375 376 gt->pm_ier = 0x0; 377 gt->pm_imr = ~gt->pm_ier; 378 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 379 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 380 /* 381 * RPS interrupts will get enabled/disabled on demand when RPS itself 382 * is enabled/disabled. Same wil be the case for GuC interrupts. 383 */ 384 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier); 385 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 386 } 387 388 static void gen5_gt_update_irq(struct intel_gt *gt, 389 u32 interrupt_mask, 390 u32 enabled_irq_mask) 391 { 392 lockdep_assert_held(>->irq_lock); 393 394 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask); 395 396 gt->gt_imr &= ~interrupt_mask; 397 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask); 398 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr); 399 } 400 401 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask) 402 { 403 gen5_gt_update_irq(gt, mask, mask); 404 intel_uncore_posting_read_fw(gt->uncore, GTIMR); 405 } 406 407 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask) 408 { 409 gen5_gt_update_irq(gt, mask, 0); 410 } 411 412 void gen5_gt_irq_reset(struct intel_gt *gt) 413 { 414 struct intel_uncore *uncore = gt->uncore; 415 416 GEN3_IRQ_RESET(uncore, GT); 417 if (INTEL_GEN(gt->i915) >= 6) 418 GEN3_IRQ_RESET(uncore, GEN6_PM); 419 } 420 421 void gen5_gt_irq_postinstall(struct intel_gt *gt) 422 { 423 struct intel_uncore *uncore = gt->uncore; 424 u32 pm_irqs = 0; 425 u32 gt_irqs = 0; 426 427 gt->gt_imr = ~0; 428 if (HAS_L3_DPF(gt->i915)) { 429 /* L3 parity interrupt is always unmasked. */ 430 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915); 431 gt_irqs |= GT_PARITY_ERROR(gt->i915); 432 } 433 434 gt_irqs |= GT_RENDER_USER_INTERRUPT; 435 if (IS_GEN(gt->i915, 5)) 436 gt_irqs |= ILK_BSD_USER_INTERRUPT; 437 else 438 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 439 440 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs); 441 442 if (INTEL_GEN(gt->i915) >= 6) { 443 /* 444 * RPS interrupts will get enabled/disabled on demand when RPS 445 * itself is enabled/disabled. 446 */ 447 if (HAS_ENGINE(gt->i915, VECS0)) { 448 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 449 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; 450 } 451 452 gt->pm_imr = 0xffffffff; 453 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs); 454 } 455 } 456