1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/drm_print.h> 9 10 #include "gem/i915_gem_context.h" 11 #include "gem/i915_gem_internal.h" 12 #include "gt/intel_gt_print.h" 13 #include "gt/intel_gt_regs.h" 14 15 #include "i915_cmd_parser.h" 16 #include "i915_drv.h" 17 #include "i915_irq.h" 18 #include "i915_reg.h" 19 #include "intel_breadcrumbs.h" 20 #include "intel_context.h" 21 #include "intel_engine.h" 22 #include "intel_engine_pm.h" 23 #include "intel_engine_regs.h" 24 #include "intel_engine_user.h" 25 #include "intel_execlists_submission.h" 26 #include "intel_gt.h" 27 #include "intel_gt_mcr.h" 28 #include "intel_gt_pm.h" 29 #include "intel_gt_requests.h" 30 #include "intel_lrc.h" 31 #include "intel_lrc_reg.h" 32 #include "intel_reset.h" 33 #include "intel_ring.h" 34 #include "uc/intel_guc_submission.h" 35 36 /* Haswell does have the CXT_SIZE register however it does not appear to be 37 * valid. Now, docs explain in dwords what is in the context object. The full 38 * size is 70720 bytes, however, the power context and execlist context will 39 * never be saved (power context is stored elsewhere, and execlists don't work 40 * on HSW) - so the final size, including the extra state required for the 41 * Resource Streamer, is 66944 bytes, which rounds to 17 pages. 42 */ 43 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 44 45 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 46 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 47 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 48 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) 49 50 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 51 52 #define MAX_MMIO_BASES 3 53 struct engine_info { 54 u8 class; 55 u8 instance; 56 /* mmio bases table *must* be sorted in reverse graphics_ver order */ 57 struct engine_mmio_base { 58 u32 graphics_ver : 8; 59 u32 base : 24; 60 } mmio_bases[MAX_MMIO_BASES]; 61 }; 62 63 static const struct engine_info intel_engines[] = { 64 [RCS0] = { 65 .class = RENDER_CLASS, 66 .instance = 0, 67 .mmio_bases = { 68 { .graphics_ver = 1, .base = RENDER_RING_BASE } 69 }, 70 }, 71 [BCS0] = { 72 .class = COPY_ENGINE_CLASS, 73 .instance = 0, 74 .mmio_bases = { 75 { .graphics_ver = 6, .base = BLT_RING_BASE } 76 }, 77 }, 78 [BCS1] = { 79 .class = COPY_ENGINE_CLASS, 80 .instance = 1, 81 .mmio_bases = { 82 { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE } 83 }, 84 }, 85 [BCS2] = { 86 .class = COPY_ENGINE_CLASS, 87 .instance = 2, 88 .mmio_bases = { 89 { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE } 90 }, 91 }, 92 [BCS3] = { 93 .class = COPY_ENGINE_CLASS, 94 .instance = 3, 95 .mmio_bases = { 96 { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE } 97 }, 98 }, 99 [BCS4] = { 100 .class = COPY_ENGINE_CLASS, 101 .instance = 4, 102 .mmio_bases = { 103 { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE } 104 }, 105 }, 106 [BCS5] = { 107 .class = COPY_ENGINE_CLASS, 108 .instance = 5, 109 .mmio_bases = { 110 { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE } 111 }, 112 }, 113 [BCS6] = { 114 .class = COPY_ENGINE_CLASS, 115 .instance = 6, 116 .mmio_bases = { 117 { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE } 118 }, 119 }, 120 [BCS7] = { 121 .class = COPY_ENGINE_CLASS, 122 .instance = 7, 123 .mmio_bases = { 124 { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE } 125 }, 126 }, 127 [BCS8] = { 128 .class = COPY_ENGINE_CLASS, 129 .instance = 8, 130 .mmio_bases = { 131 { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE } 132 }, 133 }, 134 [VCS0] = { 135 .class = VIDEO_DECODE_CLASS, 136 .instance = 0, 137 .mmio_bases = { 138 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE }, 139 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE }, 140 { .graphics_ver = 4, .base = BSD_RING_BASE } 141 }, 142 }, 143 [VCS1] = { 144 .class = VIDEO_DECODE_CLASS, 145 .instance = 1, 146 .mmio_bases = { 147 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE }, 148 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE } 149 }, 150 }, 151 [VCS2] = { 152 .class = VIDEO_DECODE_CLASS, 153 .instance = 2, 154 .mmio_bases = { 155 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE } 156 }, 157 }, 158 [VCS3] = { 159 .class = VIDEO_DECODE_CLASS, 160 .instance = 3, 161 .mmio_bases = { 162 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE } 163 }, 164 }, 165 [VCS4] = { 166 .class = VIDEO_DECODE_CLASS, 167 .instance = 4, 168 .mmio_bases = { 169 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE } 170 }, 171 }, 172 [VCS5] = { 173 .class = VIDEO_DECODE_CLASS, 174 .instance = 5, 175 .mmio_bases = { 176 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE } 177 }, 178 }, 179 [VCS6] = { 180 .class = VIDEO_DECODE_CLASS, 181 .instance = 6, 182 .mmio_bases = { 183 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE } 184 }, 185 }, 186 [VCS7] = { 187 .class = VIDEO_DECODE_CLASS, 188 .instance = 7, 189 .mmio_bases = { 190 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE } 191 }, 192 }, 193 [VECS0] = { 194 .class = VIDEO_ENHANCEMENT_CLASS, 195 .instance = 0, 196 .mmio_bases = { 197 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE }, 198 { .graphics_ver = 7, .base = VEBOX_RING_BASE } 199 }, 200 }, 201 [VECS1] = { 202 .class = VIDEO_ENHANCEMENT_CLASS, 203 .instance = 1, 204 .mmio_bases = { 205 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE } 206 }, 207 }, 208 [VECS2] = { 209 .class = VIDEO_ENHANCEMENT_CLASS, 210 .instance = 2, 211 .mmio_bases = { 212 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE } 213 }, 214 }, 215 [VECS3] = { 216 .class = VIDEO_ENHANCEMENT_CLASS, 217 .instance = 3, 218 .mmio_bases = { 219 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE } 220 }, 221 }, 222 [CCS0] = { 223 .class = COMPUTE_CLASS, 224 .instance = 0, 225 .mmio_bases = { 226 { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE } 227 } 228 }, 229 [CCS1] = { 230 .class = COMPUTE_CLASS, 231 .instance = 1, 232 .mmio_bases = { 233 { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE } 234 } 235 }, 236 [CCS2] = { 237 .class = COMPUTE_CLASS, 238 .instance = 2, 239 .mmio_bases = { 240 { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE } 241 } 242 }, 243 [CCS3] = { 244 .class = COMPUTE_CLASS, 245 .instance = 3, 246 .mmio_bases = { 247 { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE } 248 } 249 }, 250 [GSC0] = { 251 .class = OTHER_CLASS, 252 .instance = OTHER_GSC_INSTANCE, 253 .mmio_bases = { 254 { .graphics_ver = 12, .base = MTL_GSC_RING_BASE } 255 } 256 }, 257 }; 258 259 /** 260 * intel_engine_context_size() - return the size of the context for an engine 261 * @gt: the gt 262 * @class: engine class 263 * 264 * Each engine class may require a different amount of space for a context 265 * image. 266 * 267 * Return: size (in bytes) of an engine class specific context image 268 * 269 * Note: this size includes the HWSP, which is part of the context image 270 * in LRC mode, but does not include the "shared data page" used with 271 * GuC submission. The caller should account for this if using the GuC. 272 */ 273 u32 intel_engine_context_size(struct intel_gt *gt, u8 class) 274 { 275 struct intel_uncore *uncore = gt->uncore; 276 u32 cxt_size; 277 278 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); 279 280 switch (class) { 281 case COMPUTE_CLASS: 282 fallthrough; 283 case RENDER_CLASS: 284 switch (GRAPHICS_VER(gt->i915)) { 285 default: 286 MISSING_CASE(GRAPHICS_VER(gt->i915)); 287 return DEFAULT_LR_CONTEXT_RENDER_SIZE; 288 case 12: 289 case 11: 290 return GEN11_LR_CONTEXT_RENDER_SIZE; 291 case 9: 292 return GEN9_LR_CONTEXT_RENDER_SIZE; 293 case 8: 294 return GEN8_LR_CONTEXT_RENDER_SIZE; 295 case 7: 296 if (IS_HASWELL(gt->i915)) 297 return HSW_CXT_TOTAL_SIZE; 298 299 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE); 300 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, 301 PAGE_SIZE); 302 case 6: 303 cxt_size = intel_uncore_read(uncore, CXT_SIZE); 304 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, 305 PAGE_SIZE); 306 case 5: 307 case 4: 308 /* 309 * There is a discrepancy here between the size reported 310 * by the register and the size of the context layout 311 * in the docs. Both are described as authorative! 312 * 313 * The discrepancy is on the order of a few cachelines, 314 * but the total is under one page (4k), which is our 315 * minimum allocation anyway so it should all come 316 * out in the wash. 317 */ 318 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; 319 drm_dbg(>->i915->drm, 320 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", 321 GRAPHICS_VER(gt->i915), cxt_size * 64, 322 cxt_size - 1); 323 return round_up(cxt_size * 64, PAGE_SIZE); 324 case 3: 325 case 2: 326 /* For the special day when i810 gets merged. */ 327 case 1: 328 return 0; 329 } 330 break; 331 default: 332 MISSING_CASE(class); 333 fallthrough; 334 case VIDEO_DECODE_CLASS: 335 case VIDEO_ENHANCEMENT_CLASS: 336 case COPY_ENGINE_CLASS: 337 case OTHER_CLASS: 338 if (GRAPHICS_VER(gt->i915) < 8) 339 return 0; 340 return GEN8_LR_CONTEXT_OTHER_SIZE; 341 } 342 } 343 344 static u32 __engine_mmio_base(struct drm_i915_private *i915, 345 const struct engine_mmio_base *bases) 346 { 347 int i; 348 349 for (i = 0; i < MAX_MMIO_BASES; i++) 350 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver) 351 break; 352 353 GEM_BUG_ON(i == MAX_MMIO_BASES); 354 GEM_BUG_ON(!bases[i].base); 355 356 return bases[i].base; 357 } 358 359 static void __sprint_engine_name(struct intel_engine_cs *engine) 360 { 361 /* 362 * Before we know what the uABI name for this engine will be, 363 * we still would like to keep track of this engine in the debug logs. 364 * We throw in a ' here as a reminder that this isn't its final name. 365 */ 366 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", 367 intel_engine_class_repr(engine->class), 368 engine->instance) >= sizeof(engine->name)); 369 } 370 371 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) 372 { 373 /* 374 * Though they added more rings on g4x/ilk, they did not add 375 * per-engine HWSTAM until gen6. 376 */ 377 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS) 378 return; 379 380 if (GRAPHICS_VER(engine->i915) >= 3) 381 ENGINE_WRITE(engine, RING_HWSTAM, mask); 382 else 383 ENGINE_WRITE16(engine, RING_HWSTAM, mask); 384 } 385 386 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) 387 { 388 /* Mask off all writes into the unknown HWSP */ 389 intel_engine_set_hwsp_writemask(engine, ~0u); 390 } 391 392 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) 393 { 394 GEM_DEBUG_WARN_ON(iir); 395 } 396 397 static u32 get_reset_domain(u8 ver, enum intel_engine_id id) 398 { 399 u32 reset_domain; 400 401 if (ver >= 11) { 402 static const u32 engine_reset_domains[] = { 403 [RCS0] = GEN11_GRDOM_RENDER, 404 [BCS0] = GEN11_GRDOM_BLT, 405 [BCS1] = XEHPC_GRDOM_BLT1, 406 [BCS2] = XEHPC_GRDOM_BLT2, 407 [BCS3] = XEHPC_GRDOM_BLT3, 408 [BCS4] = XEHPC_GRDOM_BLT4, 409 [BCS5] = XEHPC_GRDOM_BLT5, 410 [BCS6] = XEHPC_GRDOM_BLT6, 411 [BCS7] = XEHPC_GRDOM_BLT7, 412 [BCS8] = XEHPC_GRDOM_BLT8, 413 [VCS0] = GEN11_GRDOM_MEDIA, 414 [VCS1] = GEN11_GRDOM_MEDIA2, 415 [VCS2] = GEN11_GRDOM_MEDIA3, 416 [VCS3] = GEN11_GRDOM_MEDIA4, 417 [VCS4] = GEN11_GRDOM_MEDIA5, 418 [VCS5] = GEN11_GRDOM_MEDIA6, 419 [VCS6] = GEN11_GRDOM_MEDIA7, 420 [VCS7] = GEN11_GRDOM_MEDIA8, 421 [VECS0] = GEN11_GRDOM_VECS, 422 [VECS1] = GEN11_GRDOM_VECS2, 423 [VECS2] = GEN11_GRDOM_VECS3, 424 [VECS3] = GEN11_GRDOM_VECS4, 425 [CCS0] = GEN11_GRDOM_RENDER, 426 [CCS1] = GEN11_GRDOM_RENDER, 427 [CCS2] = GEN11_GRDOM_RENDER, 428 [CCS3] = GEN11_GRDOM_RENDER, 429 [GSC0] = GEN12_GRDOM_GSC, 430 }; 431 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 432 !engine_reset_domains[id]); 433 reset_domain = engine_reset_domains[id]; 434 } else { 435 static const u32 engine_reset_domains[] = { 436 [RCS0] = GEN6_GRDOM_RENDER, 437 [BCS0] = GEN6_GRDOM_BLT, 438 [VCS0] = GEN6_GRDOM_MEDIA, 439 [VCS1] = GEN8_GRDOM_MEDIA2, 440 [VECS0] = GEN6_GRDOM_VECS, 441 }; 442 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 443 !engine_reset_domains[id]); 444 reset_domain = engine_reset_domains[id]; 445 } 446 447 return reset_domain; 448 } 449 450 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, 451 u8 logical_instance) 452 { 453 const struct engine_info *info = &intel_engines[id]; 454 struct drm_i915_private *i915 = gt->i915; 455 struct intel_engine_cs *engine; 456 u8 guc_class; 457 458 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 459 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 460 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1)); 461 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1)); 462 463 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine))) 464 return -EINVAL; 465 466 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) 467 return -EINVAL; 468 469 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 470 return -EINVAL; 471 472 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance])) 473 return -EINVAL; 474 475 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 476 if (!engine) 477 return -ENOMEM; 478 479 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); 480 481 INIT_LIST_HEAD(&engine->pinned_contexts_list); 482 engine->id = id; 483 engine->legacy_idx = INVALID_ENGINE; 484 engine->mask = BIT(id); 485 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915), 486 id); 487 engine->i915 = i915; 488 engine->gt = gt; 489 engine->uncore = gt->uncore; 490 guc_class = engine_class_to_guc_class(info->class); 491 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance); 492 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); 493 494 engine->irq_handler = nop_irq_handler; 495 496 engine->class = info->class; 497 engine->instance = info->instance; 498 engine->logical_mask = BIT(logical_instance); 499 __sprint_engine_name(engine); 500 501 if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && 502 __ffs(CCS_MASK(engine->gt)) == engine->instance) || 503 engine->class == RENDER_CLASS) 504 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; 505 506 /* features common between engines sharing EUs */ 507 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { 508 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; 509 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY; 510 } 511 512 engine->props.heartbeat_interval_ms = 513 CONFIG_DRM_I915_HEARTBEAT_INTERVAL; 514 engine->props.max_busywait_duration_ns = 515 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT; 516 engine->props.preempt_timeout_ms = 517 CONFIG_DRM_I915_PREEMPT_TIMEOUT; 518 engine->props.stop_timeout_ms = 519 CONFIG_DRM_I915_STOP_TIMEOUT; 520 engine->props.timeslice_duration_ms = 521 CONFIG_DRM_I915_TIMESLICE_DURATION; 522 523 /* 524 * Mid-thread pre-emption is not available in Gen12. Unfortunately, 525 * some compute workloads run quite long threads. That means they get 526 * reset due to not pre-empting in a timely manner. So, bump the 527 * pre-emption timeout value to be much higher for compute engines. 528 */ 529 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) 530 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE; 531 532 /* Cap properties according to any system limits */ 533 #define CLAMP_PROP(field) \ 534 do { \ 535 u64 clamp = intel_clamp_##field(engine, engine->props.field); \ 536 if (clamp != engine->props.field) { \ 537 drm_notice(&engine->i915->drm, \ 538 "Warning, clamping %s to %lld to prevent overflow\n", \ 539 #field, clamp); \ 540 engine->props.field = clamp; \ 541 } \ 542 } while (0) 543 544 CLAMP_PROP(heartbeat_interval_ms); 545 CLAMP_PROP(max_busywait_duration_ns); 546 CLAMP_PROP(preempt_timeout_ms); 547 CLAMP_PROP(stop_timeout_ms); 548 CLAMP_PROP(timeslice_duration_ms); 549 550 #undef CLAMP_PROP 551 552 engine->defaults = engine->props; /* never to change again */ 553 554 engine->context_size = intel_engine_context_size(gt, engine->class); 555 if (WARN_ON(engine->context_size > BIT(20))) 556 engine->context_size = 0; 557 if (engine->context_size) 558 DRIVER_CAPS(i915)->has_logical_contexts = true; 559 560 ewma__engine_latency_init(&engine->latency); 561 562 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 563 564 /* Scrub mmio state on takeover */ 565 intel_engine_sanitize_mmio(engine); 566 567 gt->engine_class[info->class][info->instance] = engine; 568 gt->engine[id] = engine; 569 570 return 0; 571 } 572 573 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value) 574 { 575 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 576 577 return value; 578 } 579 580 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value) 581 { 582 value = min(value, jiffies_to_nsecs(2)); 583 584 return value; 585 } 586 587 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value) 588 { 589 /* 590 * NB: The GuC API only supports 32bit values. However, the limit is further 591 * reduced due to internal calculations which would otherwise overflow. 592 */ 593 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 594 value = min_t(u64, value, guc_policy_max_preempt_timeout_ms()); 595 596 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 597 598 return value; 599 } 600 601 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value) 602 { 603 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 604 605 return value; 606 } 607 608 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value) 609 { 610 /* 611 * NB: The GuC API only supports 32bit values. However, the limit is further 612 * reduced due to internal calculations which would otherwise overflow. 613 */ 614 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 615 value = min_t(u64, value, guc_policy_max_exec_quantum_ms()); 616 617 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 618 619 return value; 620 } 621 622 static void __setup_engine_capabilities(struct intel_engine_cs *engine) 623 { 624 struct drm_i915_private *i915 = engine->i915; 625 626 if (engine->class == VIDEO_DECODE_CLASS) { 627 /* 628 * HEVC support is present on first engine instance 629 * before Gen11 and on all instances afterwards. 630 */ 631 if (GRAPHICS_VER(i915) >= 11 || 632 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 633 engine->uabi_capabilities |= 634 I915_VIDEO_CLASS_CAPABILITY_HEVC; 635 636 /* 637 * SFC block is present only on even logical engine 638 * instances. 639 */ 640 if ((GRAPHICS_VER(i915) >= 11 && 641 (engine->gt->info.vdbox_sfc_access & 642 BIT(engine->instance))) || 643 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 644 engine->uabi_capabilities |= 645 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 646 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { 647 if (GRAPHICS_VER(i915) >= 9 && 648 engine->gt->info.sfc_mask & BIT(engine->instance)) 649 engine->uabi_capabilities |= 650 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 651 } 652 } 653 654 static void intel_setup_engine_capabilities(struct intel_gt *gt) 655 { 656 struct intel_engine_cs *engine; 657 enum intel_engine_id id; 658 659 for_each_engine(engine, gt, id) 660 __setup_engine_capabilities(engine); 661 } 662 663 /** 664 * intel_engines_release() - free the resources allocated for Command Streamers 665 * @gt: pointer to struct intel_gt 666 */ 667 void intel_engines_release(struct intel_gt *gt) 668 { 669 struct intel_engine_cs *engine; 670 enum intel_engine_id id; 671 672 /* 673 * Before we release the resources held by engine, we must be certain 674 * that the HW is no longer accessing them -- having the GPU scribble 675 * to or read from a page being used for something else causes no end 676 * of fun. 677 * 678 * The GPU should be reset by this point, but assume the worst just 679 * in case we aborted before completely initialising the engines. 680 */ 681 GEM_BUG_ON(intel_gt_pm_is_awake(gt)); 682 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 683 __intel_gt_reset(gt, ALL_ENGINES); 684 685 /* Decouple the backend; but keep the layout for late GPU resets */ 686 for_each_engine(engine, gt, id) { 687 if (!engine->release) 688 continue; 689 690 intel_wakeref_wait_for_idle(&engine->wakeref); 691 GEM_BUG_ON(intel_engine_pm_is_awake(engine)); 692 693 engine->release(engine); 694 engine->release = NULL; 695 696 memset(&engine->reset, 0, sizeof(engine->reset)); 697 } 698 } 699 700 void intel_engine_free_request_pool(struct intel_engine_cs *engine) 701 { 702 if (!engine->request_pool) 703 return; 704 705 kmem_cache_free(i915_request_slab_cache(), engine->request_pool); 706 } 707 708 void intel_engines_free(struct intel_gt *gt) 709 { 710 struct intel_engine_cs *engine; 711 enum intel_engine_id id; 712 713 /* Free the requests! dma-resv keeps fences around for an eternity */ 714 rcu_barrier(); 715 716 for_each_engine(engine, gt, id) { 717 intel_engine_free_request_pool(engine); 718 kfree(engine); 719 gt->engine[id] = NULL; 720 } 721 } 722 723 static 724 bool gen11_vdbox_has_sfc(struct intel_gt *gt, 725 unsigned int physical_vdbox, 726 unsigned int logical_vdbox, u16 vdbox_mask) 727 { 728 struct drm_i915_private *i915 = gt->i915; 729 730 /* 731 * In Gen11, only even numbered logical VDBOXes are hooked 732 * up to an SFC (Scaler & Format Converter) unit. 733 * In Gen12, Even numbered physical instance always are connected 734 * to an SFC. Odd numbered physical instances have SFC only if 735 * previous even instance is fused off. 736 * 737 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field 738 * in the fuse register that tells us whether a specific SFC is present. 739 */ 740 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) 741 return false; 742 else if (MEDIA_VER(i915) >= 12) 743 return (physical_vdbox % 2 == 0) || 744 !(BIT(physical_vdbox - 1) & vdbox_mask); 745 else if (MEDIA_VER(i915) == 11) 746 return logical_vdbox % 2 == 0; 747 748 return false; 749 } 750 751 static void engine_mask_apply_media_fuses(struct intel_gt *gt) 752 { 753 struct drm_i915_private *i915 = gt->i915; 754 unsigned int logical_vdbox = 0; 755 unsigned int i; 756 u32 media_fuse, fuse1; 757 u16 vdbox_mask; 758 u16 vebox_mask; 759 760 if (MEDIA_VER(gt->i915) < 11) 761 return; 762 763 /* 764 * On newer platforms the fusing register is called 'enable' and has 765 * enable semantics, while on older platforms it is called 'disable' 766 * and bits have disable semantices. 767 */ 768 media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); 769 if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) 770 media_fuse = ~media_fuse; 771 772 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 773 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 774 GEN11_GT_VEBOX_DISABLE_SHIFT; 775 776 if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { 777 fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); 778 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); 779 } else { 780 gt->info.sfc_mask = ~0; 781 } 782 783 for (i = 0; i < I915_MAX_VCS; i++) { 784 if (!HAS_ENGINE(gt, _VCS(i))) { 785 vdbox_mask &= ~BIT(i); 786 continue; 787 } 788 789 if (!(BIT(i) & vdbox_mask)) { 790 gt->info.engine_mask &= ~BIT(_VCS(i)); 791 drm_dbg(&i915->drm, "vcs%u fused off\n", i); 792 continue; 793 } 794 795 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) 796 gt->info.vdbox_sfc_access |= BIT(i); 797 logical_vdbox++; 798 } 799 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 800 vdbox_mask, VDBOX_MASK(gt)); 801 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 802 803 for (i = 0; i < I915_MAX_VECS; i++) { 804 if (!HAS_ENGINE(gt, _VECS(i))) { 805 vebox_mask &= ~BIT(i); 806 continue; 807 } 808 809 if (!(BIT(i) & vebox_mask)) { 810 gt->info.engine_mask &= ~BIT(_VECS(i)); 811 drm_dbg(&i915->drm, "vecs%u fused off\n", i); 812 } 813 } 814 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", 815 vebox_mask, VEBOX_MASK(gt)); 816 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 817 } 818 819 static void engine_mask_apply_compute_fuses(struct intel_gt *gt) 820 { 821 struct drm_i915_private *i915 = gt->i915; 822 struct intel_gt_info *info = >->info; 823 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS; 824 unsigned long ccs_mask; 825 unsigned int i; 826 827 if (GRAPHICS_VER(i915) < 11) 828 return; 829 830 if (hweight32(CCS_MASK(gt)) <= 1) 831 return; 832 833 ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask, 834 ss_per_ccs); 835 /* 836 * If all DSS in a quadrant are fused off, the corresponding CCS 837 * engine is not available for use. 838 */ 839 for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) { 840 info->engine_mask &= ~BIT(_CCS(i)); 841 drm_dbg(&i915->drm, "ccs%u fused off\n", i); 842 } 843 } 844 845 static void engine_mask_apply_copy_fuses(struct intel_gt *gt) 846 { 847 struct drm_i915_private *i915 = gt->i915; 848 struct intel_gt_info *info = >->info; 849 unsigned long meml3_mask; 850 unsigned long quad; 851 852 if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && 853 GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) 854 return; 855 856 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); 857 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); 858 859 /* 860 * Link Copy engines may be fused off according to meml3_mask. Each 861 * bit is a quad that houses 2 Link Copy and two Sub Copy engines. 862 */ 863 for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) { 864 unsigned int instance = quad * 2 + 1; 865 intel_engine_mask_t mask = GENMASK(_BCS(instance + 1), 866 _BCS(instance)); 867 868 if (mask & info->engine_mask) { 869 drm_dbg(&i915->drm, "bcs%u fused off\n", instance); 870 drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1); 871 872 info->engine_mask &= ~mask; 873 } 874 } 875 } 876 877 /* 878 * Determine which engines are fused off in our particular hardware. 879 * Note that we have a catch-22 situation where we need to be able to access 880 * the blitter forcewake domain to read the engine fuses, but at the same time 881 * we need to know which engines are available on the system to know which 882 * forcewake domains are present. We solve this by intializing the forcewake 883 * domains based on the full engine mask in the platform capabilities before 884 * calling this function and pruning the domains for fused-off engines 885 * afterwards. 886 */ 887 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) 888 { 889 struct intel_gt_info *info = >->info; 890 891 GEM_BUG_ON(!info->engine_mask); 892 893 engine_mask_apply_media_fuses(gt); 894 engine_mask_apply_compute_fuses(gt); 895 engine_mask_apply_copy_fuses(gt); 896 897 /* 898 * The only use of the GSC CS is to load and communicate with the GSC 899 * FW, so we have no use for it if we don't have the FW. 900 * 901 * IMPORTANT: in cases where we don't have the GSC FW, we have a 902 * catch-22 situation that breaks media C6 due to 2 requirements: 903 * 1) once turned on, the GSC power well will not go to sleep unless the 904 * GSC FW is loaded. 905 * 2) to enable idling (which is required for media C6) we need to 906 * initialize the IDLE_MSG register for the GSC CS and do at least 1 907 * submission, which will wake up the GSC power well. 908 */ 909 if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(>->uc)) { 910 drm_notice(>->i915->drm, 911 "No GSC FW selected, disabling GSC CS and media C6\n"); 912 info->engine_mask &= ~BIT(GSC0); 913 } 914 915 /* 916 * Do not create the command streamer for CCS slices beyond the first. 917 * All the workload submitted to the first engine will be shared among 918 * all the slices. 919 * 920 * Once the user will be allowed to customize the CCS mode, then this 921 * check needs to be removed. 922 */ 923 if (IS_DG2(gt->i915)) { 924 u8 first_ccs = __ffs(CCS_MASK(gt)); 925 926 /* 927 * Store the number of active cslices before 928 * changing the CCS engine configuration 929 */ 930 gt->ccs.cslices = CCS_MASK(gt); 931 932 /* Mask off all the CCS engine */ 933 info->engine_mask &= ~GENMASK(CCS3, CCS0); 934 /* Put back in the first CCS engine */ 935 info->engine_mask |= BIT(_CCS(first_ccs)); 936 } 937 938 return info->engine_mask; 939 } 940 941 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, 942 u8 class, const u8 *map, u8 num_instances) 943 { 944 int i, j; 945 u8 current_logical_id = 0; 946 947 for (j = 0; j < num_instances; ++j) { 948 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 949 if (!HAS_ENGINE(gt, i) || 950 intel_engines[i].class != class) 951 continue; 952 953 if (intel_engines[i].instance == map[j]) { 954 logical_ids[intel_engines[i].instance] = 955 current_logical_id++; 956 break; 957 } 958 } 959 } 960 } 961 962 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) 963 { 964 /* 965 * Logical to physical mapping is needed for proper support 966 * to split-frame feature. 967 */ 968 if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) { 969 const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 }; 970 971 populate_logical_ids(gt, logical_ids, class, 972 map, ARRAY_SIZE(map)); 973 } else { 974 int i; 975 u8 map[MAX_ENGINE_INSTANCE + 1]; 976 977 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) 978 map[i] = i; 979 populate_logical_ids(gt, logical_ids, class, 980 map, ARRAY_SIZE(map)); 981 } 982 } 983 984 /** 985 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers 986 * @gt: pointer to struct intel_gt 987 * 988 * Return: non-zero if the initialization failed. 989 */ 990 int intel_engines_init_mmio(struct intel_gt *gt) 991 { 992 struct drm_i915_private *i915 = gt->i915; 993 const unsigned int engine_mask = init_engine_mask(gt); 994 unsigned int mask = 0; 995 unsigned int i, class; 996 u8 logical_ids[MAX_ENGINE_INSTANCE + 1]; 997 int err; 998 999 drm_WARN_ON(&i915->drm, engine_mask == 0); 1000 drm_WARN_ON(&i915->drm, engine_mask & 1001 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); 1002 1003 if (i915_inject_probe_failure(i915)) 1004 return -ENODEV; 1005 1006 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { 1007 setup_logical_ids(gt, logical_ids, class); 1008 1009 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 1010 u8 instance = intel_engines[i].instance; 1011 1012 if (intel_engines[i].class != class || 1013 !HAS_ENGINE(gt, i)) 1014 continue; 1015 1016 err = intel_engine_setup(gt, i, 1017 logical_ids[instance]); 1018 if (err) 1019 goto cleanup; 1020 1021 mask |= BIT(i); 1022 } 1023 } 1024 1025 /* 1026 * Catch failures to update intel_engines table when the new engines 1027 * are added to the driver by a warning and disabling the forgotten 1028 * engines. 1029 */ 1030 if (drm_WARN_ON(&i915->drm, mask != engine_mask)) 1031 gt->info.engine_mask = mask; 1032 1033 gt->info.num_engines = hweight32(mask); 1034 1035 intel_gt_check_and_clear_faults(gt); 1036 1037 intel_setup_engine_capabilities(gt); 1038 1039 intel_uncore_prune_engine_fw_domains(gt->uncore, gt); 1040 1041 return 0; 1042 1043 cleanup: 1044 intel_engines_free(gt); 1045 return err; 1046 } 1047 1048 void intel_engine_init_execlists(struct intel_engine_cs *engine) 1049 { 1050 struct intel_engine_execlists * const execlists = &engine->execlists; 1051 1052 execlists->port_mask = 1; 1053 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); 1054 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 1055 1056 memset(execlists->pending, 0, sizeof(execlists->pending)); 1057 execlists->active = 1058 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 1059 } 1060 1061 static void cleanup_status_page(struct intel_engine_cs *engine) 1062 { 1063 struct i915_vma *vma; 1064 1065 /* Prevent writes into HWSP after returning the page to the system */ 1066 intel_engine_set_hwsp_writemask(engine, ~0u); 1067 1068 vma = fetch_and_zero(&engine->status_page.vma); 1069 if (!vma) 1070 return; 1071 1072 if (!HWS_NEEDS_PHYSICAL(engine->i915)) 1073 i915_vma_unpin(vma); 1074 1075 i915_gem_object_unpin_map(vma->obj); 1076 i915_gem_object_put(vma->obj); 1077 } 1078 1079 static int pin_ggtt_status_page(struct intel_engine_cs *engine, 1080 struct i915_gem_ww_ctx *ww, 1081 struct i915_vma *vma) 1082 { 1083 unsigned int flags; 1084 1085 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) 1086 /* 1087 * On g33, we cannot place HWS above 256MiB, so 1088 * restrict its pinning to the low mappable arena. 1089 * Though this restriction is not documented for 1090 * gen4, gen5, or byt, they also behave similarly 1091 * and hang if the HWS is placed at the top of the 1092 * GTT. To generalise, it appears that all !llc 1093 * platforms have issues with us placing the HWS 1094 * above the mappable region (even though we never 1095 * actually map it). 1096 */ 1097 flags = PIN_MAPPABLE; 1098 else 1099 flags = PIN_HIGH; 1100 1101 return i915_ggtt_pin(vma, ww, 0, flags); 1102 } 1103 1104 static int init_status_page(struct intel_engine_cs *engine) 1105 { 1106 struct drm_i915_gem_object *obj; 1107 struct i915_gem_ww_ctx ww; 1108 struct i915_vma *vma; 1109 void *vaddr; 1110 int ret; 1111 1112 INIT_LIST_HEAD(&engine->status_page.timelines); 1113 1114 /* 1115 * Though the HWS register does support 36bit addresses, historically 1116 * we have had hangs and corruption reported due to wild writes if 1117 * the HWS is placed above 4G. We only allow objects to be allocated 1118 * in GFP_DMA32 for i965, and no earlier physical address users had 1119 * access to more than 4G. 1120 */ 1121 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 1122 if (IS_ERR(obj)) { 1123 drm_err(&engine->i915->drm, 1124 "Failed to allocate status page\n"); 1125 return PTR_ERR(obj); 1126 } 1127 1128 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 1129 1130 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1131 if (IS_ERR(vma)) { 1132 ret = PTR_ERR(vma); 1133 goto err_put; 1134 } 1135 1136 i915_gem_ww_ctx_init(&ww, true); 1137 retry: 1138 ret = i915_gem_object_lock(obj, &ww); 1139 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915)) 1140 ret = pin_ggtt_status_page(engine, &ww, vma); 1141 if (ret) 1142 goto err; 1143 1144 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1145 if (IS_ERR(vaddr)) { 1146 ret = PTR_ERR(vaddr); 1147 goto err_unpin; 1148 } 1149 1150 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); 1151 engine->status_page.vma = vma; 1152 1153 err_unpin: 1154 if (ret) 1155 i915_vma_unpin(vma); 1156 err: 1157 if (ret == -EDEADLK) { 1158 ret = i915_gem_ww_ctx_backoff(&ww); 1159 if (!ret) 1160 goto retry; 1161 } 1162 i915_gem_ww_ctx_fini(&ww); 1163 err_put: 1164 if (ret) 1165 i915_gem_object_put(obj); 1166 return ret; 1167 } 1168 1169 static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine) 1170 { 1171 static const union intel_engine_tlb_inv_reg gen8_regs[] = { 1172 [RENDER_CLASS].reg = GEN8_RTCR, 1173 [VIDEO_DECODE_CLASS].reg = GEN8_M1TCR, /* , GEN8_M2TCR */ 1174 [VIDEO_ENHANCEMENT_CLASS].reg = GEN8_VTCR, 1175 [COPY_ENGINE_CLASS].reg = GEN8_BTCR, 1176 }; 1177 static const union intel_engine_tlb_inv_reg gen12_regs[] = { 1178 [RENDER_CLASS].reg = GEN12_GFX_TLB_INV_CR, 1179 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR, 1180 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR, 1181 [COPY_ENGINE_CLASS].reg = GEN12_BLT_TLB_INV_CR, 1182 [COMPUTE_CLASS].reg = GEN12_COMPCTX_TLB_INV_CR, 1183 }; 1184 static const union intel_engine_tlb_inv_reg xehp_regs[] = { 1185 [RENDER_CLASS].mcr_reg = XEHP_GFX_TLB_INV_CR, 1186 [VIDEO_DECODE_CLASS].mcr_reg = XEHP_VD_TLB_INV_CR, 1187 [VIDEO_ENHANCEMENT_CLASS].mcr_reg = XEHP_VE_TLB_INV_CR, 1188 [COPY_ENGINE_CLASS].mcr_reg = XEHP_BLT_TLB_INV_CR, 1189 [COMPUTE_CLASS].mcr_reg = XEHP_COMPCTX_TLB_INV_CR, 1190 }; 1191 static const union intel_engine_tlb_inv_reg xelpmp_regs[] = { 1192 [VIDEO_DECODE_CLASS].reg = GEN12_VD_TLB_INV_CR, 1193 [VIDEO_ENHANCEMENT_CLASS].reg = GEN12_VE_TLB_INV_CR, 1194 [OTHER_CLASS].reg = XELPMP_GSC_TLB_INV_CR, 1195 }; 1196 struct drm_i915_private *i915 = engine->i915; 1197 const unsigned int instance = engine->instance; 1198 const unsigned int class = engine->class; 1199 const union intel_engine_tlb_inv_reg *regs; 1200 union intel_engine_tlb_inv_reg reg; 1201 unsigned int num = 0; 1202 u32 val; 1203 1204 /* 1205 * New platforms should not be added with catch-all-newer (>=) 1206 * condition so that any later platform added triggers the below warning 1207 * and in turn mandates a human cross-check of whether the invalidation 1208 * flows have compatible semantics. 1209 * 1210 * For instance with the 11.00 -> 12.00 transition three out of five 1211 * respective engine registers were moved to masked type. Then after the 1212 * 12.00 -> 12.50 transition multi cast handling is required too. 1213 */ 1214 1215 if (engine->gt->type == GT_MEDIA) { 1216 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) { 1217 regs = xelpmp_regs; 1218 num = ARRAY_SIZE(xelpmp_regs); 1219 } 1220 } else { 1221 if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || 1222 GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) || 1223 GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) || 1224 GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { 1225 regs = xehp_regs; 1226 num = ARRAY_SIZE(xehp_regs); 1227 } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) || 1228 GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) { 1229 regs = gen12_regs; 1230 num = ARRAY_SIZE(gen12_regs); 1231 } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) { 1232 regs = gen8_regs; 1233 num = ARRAY_SIZE(gen8_regs); 1234 } else if (GRAPHICS_VER(i915) < 8) { 1235 return 0; 1236 } 1237 } 1238 1239 if (gt_WARN_ONCE(engine->gt, !num, 1240 "Platform does not implement TLB invalidation!")) 1241 return -ENODEV; 1242 1243 if (gt_WARN_ON_ONCE(engine->gt, 1244 class >= num || 1245 (!regs[class].reg.reg && 1246 !regs[class].mcr_reg.reg))) 1247 return -ERANGE; 1248 1249 reg = regs[class]; 1250 1251 if (regs == xelpmp_regs && class == OTHER_CLASS) { 1252 /* 1253 * There's only a single GSC instance, but it uses register bit 1254 * 1 instead of either 0 or OTHER_GSC_INSTANCE. 1255 */ 1256 GEM_WARN_ON(instance != OTHER_GSC_INSTANCE); 1257 val = 1; 1258 } else if (regs == gen8_regs && class == VIDEO_DECODE_CLASS && instance == 1) { 1259 reg.reg = GEN8_M2TCR; 1260 val = 0; 1261 } else { 1262 val = instance; 1263 } 1264 1265 val = BIT(val); 1266 1267 engine->tlb_inv.mcr = regs == xehp_regs; 1268 engine->tlb_inv.reg = reg; 1269 engine->tlb_inv.done = val; 1270 1271 if (GRAPHICS_VER(i915) >= 12 && 1272 (engine->class == VIDEO_DECODE_CLASS || 1273 engine->class == VIDEO_ENHANCEMENT_CLASS || 1274 engine->class == COMPUTE_CLASS || 1275 engine->class == OTHER_CLASS)) 1276 engine->tlb_inv.request = _MASKED_BIT_ENABLE(val); 1277 else 1278 engine->tlb_inv.request = val; 1279 1280 return 0; 1281 } 1282 1283 static int engine_setup_common(struct intel_engine_cs *engine) 1284 { 1285 int err; 1286 1287 init_llist_head(&engine->barrier_tasks); 1288 1289 err = intel_engine_init_tlb_invalidation(engine); 1290 if (err) 1291 return err; 1292 1293 err = init_status_page(engine); 1294 if (err) 1295 return err; 1296 1297 engine->breadcrumbs = intel_breadcrumbs_create(engine); 1298 if (!engine->breadcrumbs) { 1299 err = -ENOMEM; 1300 goto err_status; 1301 } 1302 1303 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL); 1304 if (!engine->sched_engine) { 1305 err = -ENOMEM; 1306 goto err_sched_engine; 1307 } 1308 engine->sched_engine->private_data = engine; 1309 1310 err = intel_engine_init_cmd_parser(engine); 1311 if (err) 1312 goto err_cmd_parser; 1313 1314 intel_engine_init_execlists(engine); 1315 intel_engine_init__pm(engine); 1316 intel_engine_init_retire(engine); 1317 1318 /* Use the whole device by default */ 1319 engine->sseu = 1320 intel_sseu_from_device_info(&engine->gt->info.sseu); 1321 1322 intel_engine_init_workarounds(engine); 1323 intel_engine_init_whitelist(engine); 1324 intel_engine_init_ctx_wa(engine); 1325 1326 if (GRAPHICS_VER(engine->i915) >= 12) 1327 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; 1328 1329 return 0; 1330 1331 err_cmd_parser: 1332 i915_sched_engine_put(engine->sched_engine); 1333 err_sched_engine: 1334 intel_breadcrumbs_put(engine->breadcrumbs); 1335 err_status: 1336 cleanup_status_page(engine); 1337 return err; 1338 } 1339 1340 struct measure_breadcrumb { 1341 struct i915_request rq; 1342 struct intel_ring ring; 1343 u32 cs[2048]; 1344 }; 1345 1346 static int measure_breadcrumb_dw(struct intel_context *ce) 1347 { 1348 struct intel_engine_cs *engine = ce->engine; 1349 struct measure_breadcrumb *frame; 1350 int dw; 1351 1352 GEM_BUG_ON(!engine->gt->scratch); 1353 1354 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1355 if (!frame) 1356 return -ENOMEM; 1357 1358 frame->rq.i915 = engine->i915; 1359 frame->rq.engine = engine; 1360 frame->rq.context = ce; 1361 rcu_assign_pointer(frame->rq.timeline, ce->timeline); 1362 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno; 1363 1364 frame->ring.vaddr = frame->cs; 1365 frame->ring.size = sizeof(frame->cs); 1366 frame->ring.wrap = 1367 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size); 1368 frame->ring.effective_size = frame->ring.size; 1369 intel_ring_update_space(&frame->ring); 1370 frame->rq.ring = &frame->ring; 1371 1372 mutex_lock(&ce->timeline->mutex); 1373 spin_lock_irq(&engine->sched_engine->lock); 1374 1375 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; 1376 1377 spin_unlock_irq(&engine->sched_engine->lock); 1378 mutex_unlock(&ce->timeline->mutex); 1379 1380 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ 1381 1382 kfree(frame); 1383 return dw; 1384 } 1385 1386 struct intel_context * 1387 intel_engine_create_pinned_context(struct intel_engine_cs *engine, 1388 struct i915_address_space *vm, 1389 unsigned int ring_size, 1390 unsigned int hwsp, 1391 struct lock_class_key *key, 1392 const char *name) 1393 { 1394 struct intel_context *ce; 1395 int err; 1396 1397 ce = intel_context_create(engine); 1398 if (IS_ERR(ce)) 1399 return ce; 1400 1401 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags); 1402 ce->timeline = page_pack_bits(NULL, hwsp); 1403 ce->ring = NULL; 1404 ce->ring_size = ring_size; 1405 1406 i915_vm_put(ce->vm); 1407 ce->vm = i915_vm_get(vm); 1408 1409 err = intel_context_pin(ce); /* perma-pin so it is always available */ 1410 if (err) { 1411 intel_context_put(ce); 1412 return ERR_PTR(err); 1413 } 1414 1415 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list); 1416 1417 /* 1418 * Give our perma-pinned kernel timelines a separate lockdep class, 1419 * so that we can use them from within the normal user timelines 1420 * should we need to inject GPU operations during their request 1421 * construction. 1422 */ 1423 lockdep_set_class_and_name(&ce->timeline->mutex, key, name); 1424 1425 return ce; 1426 } 1427 1428 void intel_engine_destroy_pinned_context(struct intel_context *ce) 1429 { 1430 struct intel_engine_cs *engine = ce->engine; 1431 struct i915_vma *hwsp = engine->status_page.vma; 1432 1433 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp); 1434 1435 mutex_lock(&hwsp->vm->mutex); 1436 list_del(&ce->timeline->engine_link); 1437 mutex_unlock(&hwsp->vm->mutex); 1438 1439 list_del(&ce->pinned_contexts_link); 1440 intel_context_unpin(ce); 1441 intel_context_put(ce); 1442 } 1443 1444 static struct intel_context * 1445 create_kernel_context(struct intel_engine_cs *engine) 1446 { 1447 static struct lock_class_key kernel; 1448 1449 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, 1450 I915_GEM_HWS_SEQNO_ADDR, 1451 &kernel, "kernel_context"); 1452 } 1453 1454 /* 1455 * engine_init_common - initialize engine state which might require hw access 1456 * @engine: Engine to initialize. 1457 * 1458 * Initializes @engine@ structure members shared between legacy and execlists 1459 * submission modes which do require hardware access. 1460 * 1461 * Typcally done at later stages of submission mode specific engine setup. 1462 * 1463 * Returns zero on success or an error code on failure. 1464 */ 1465 static int engine_init_common(struct intel_engine_cs *engine) 1466 { 1467 struct intel_context *ce; 1468 int ret; 1469 1470 engine->set_default_submission(engine); 1471 1472 /* 1473 * We may need to do things with the shrinker which 1474 * require us to immediately switch back to the default 1475 * context. This can cause a problem as pinning the 1476 * default context also requires GTT space which may not 1477 * be available. To avoid this we always pin the default 1478 * context. 1479 */ 1480 ce = create_kernel_context(engine); 1481 if (IS_ERR(ce)) 1482 return PTR_ERR(ce); 1483 1484 ret = measure_breadcrumb_dw(ce); 1485 if (ret < 0) 1486 goto err_context; 1487 1488 engine->emit_fini_breadcrumb_dw = ret; 1489 engine->kernel_context = ce; 1490 1491 return 0; 1492 1493 err_context: 1494 intel_engine_destroy_pinned_context(ce); 1495 return ret; 1496 } 1497 1498 int intel_engines_init(struct intel_gt *gt) 1499 { 1500 int (*setup)(struct intel_engine_cs *engine); 1501 struct intel_engine_cs *engine; 1502 enum intel_engine_id id; 1503 int err; 1504 1505 if (intel_uc_uses_guc_submission(>->uc)) { 1506 gt->submission_method = INTEL_SUBMISSION_GUC; 1507 setup = intel_guc_submission_setup; 1508 } else if (HAS_EXECLISTS(gt->i915)) { 1509 gt->submission_method = INTEL_SUBMISSION_ELSP; 1510 setup = intel_execlists_submission_setup; 1511 } else { 1512 gt->submission_method = INTEL_SUBMISSION_RING; 1513 setup = intel_ring_submission_setup; 1514 } 1515 1516 for_each_engine(engine, gt, id) { 1517 err = engine_setup_common(engine); 1518 if (err) 1519 return err; 1520 1521 err = setup(engine); 1522 if (err) { 1523 intel_engine_cleanup_common(engine); 1524 return err; 1525 } 1526 1527 /* The backend should now be responsible for cleanup */ 1528 GEM_BUG_ON(engine->release == NULL); 1529 1530 err = engine_init_common(engine); 1531 if (err) 1532 return err; 1533 1534 intel_engine_add_user(engine); 1535 } 1536 1537 return 0; 1538 } 1539 1540 /** 1541 * intel_engine_cleanup_common - cleans up the engine state created by 1542 * the common initiailizers. 1543 * @engine: Engine to cleanup. 1544 * 1545 * This cleans up everything created by the common helpers. 1546 */ 1547 void intel_engine_cleanup_common(struct intel_engine_cs *engine) 1548 { 1549 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests)); 1550 1551 i915_sched_engine_put(engine->sched_engine); 1552 intel_breadcrumbs_put(engine->breadcrumbs); 1553 1554 intel_engine_fini_retire(engine); 1555 intel_engine_cleanup_cmd_parser(engine); 1556 1557 if (engine->default_state) 1558 fput(engine->default_state); 1559 1560 if (engine->kernel_context) 1561 intel_engine_destroy_pinned_context(engine->kernel_context); 1562 1563 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); 1564 cleanup_status_page(engine); 1565 1566 intel_wa_list_free(&engine->ctx_wa_list); 1567 intel_wa_list_free(&engine->wa_list); 1568 intel_wa_list_free(&engine->whitelist); 1569 } 1570 1571 /** 1572 * intel_engine_resume - re-initializes the HW state of the engine 1573 * @engine: Engine to resume. 1574 * 1575 * Returns zero on success or an error code on failure. 1576 */ 1577 int intel_engine_resume(struct intel_engine_cs *engine) 1578 { 1579 intel_engine_apply_workarounds(engine); 1580 intel_engine_apply_whitelist(engine); 1581 1582 return engine->resume(engine); 1583 } 1584 1585 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 1586 { 1587 struct drm_i915_private *i915 = engine->i915; 1588 1589 u64 acthd; 1590 1591 if (GRAPHICS_VER(i915) >= 8) 1592 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); 1593 else if (GRAPHICS_VER(i915) >= 4) 1594 acthd = ENGINE_READ(engine, RING_ACTHD); 1595 else 1596 acthd = ENGINE_READ(engine, ACTHD); 1597 1598 return acthd; 1599 } 1600 1601 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 1602 { 1603 u64 bbaddr; 1604 1605 if (GRAPHICS_VER(engine->i915) >= 8) 1606 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); 1607 else 1608 bbaddr = ENGINE_READ(engine, RING_BBADDR); 1609 1610 return bbaddr; 1611 } 1612 1613 static unsigned long stop_timeout(const struct intel_engine_cs *engine) 1614 { 1615 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ 1616 return 0; 1617 1618 /* 1619 * If we are doing a normal GPU reset, we can take our time and allow 1620 * the engine to quiesce. We've stopped submission to the engine, and 1621 * if we wait long enough an innocent context should complete and 1622 * leave the engine idle. So they should not be caught unaware by 1623 * the forthcoming GPU reset (which usually follows the stop_cs)! 1624 */ 1625 return READ_ONCE(engine->props.stop_timeout_ms); 1626 } 1627 1628 static int __intel_engine_stop_cs(struct intel_engine_cs *engine, 1629 int fast_timeout_us, 1630 int slow_timeout_ms) 1631 { 1632 struct intel_uncore *uncore = engine->uncore; 1633 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base); 1634 int err; 1635 1636 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); 1637 1638 /* 1639 * Wa_22011802037: Prior to doing a reset, ensure CS is 1640 * stopped, set ring stop bit and prefetch disable bit to halt CS 1641 */ 1642 if (intel_engine_reset_needs_wa_22011802037(engine->gt)) 1643 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), 1644 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); 1645 1646 err = __intel_wait_for_register_fw(engine->uncore, mode, 1647 MODE_IDLE, MODE_IDLE, 1648 fast_timeout_us, 1649 slow_timeout_ms, 1650 NULL); 1651 1652 /* A final mmio read to let GPU writes be hopefully flushed to memory */ 1653 intel_uncore_posting_read_fw(uncore, mode); 1654 return err; 1655 } 1656 1657 int intel_engine_stop_cs(struct intel_engine_cs *engine) 1658 { 1659 int err = 0; 1660 1661 if (GRAPHICS_VER(engine->i915) < 3) 1662 return -ENODEV; 1663 1664 ENGINE_TRACE(engine, "\n"); 1665 /* 1666 * TODO: Find out why occasionally stopping the CS times out. Seen 1667 * especially with gem_eio tests. 1668 * 1669 * Occasionally trying to stop the cs times out, but does not adversely 1670 * affect functionality. The timeout is set as a config parameter that 1671 * defaults to 100ms. In most cases the follow up operation is to wait 1672 * for pending MI_FORCE_WAKES. The assumption is that this timeout is 1673 * sufficient for any pending MI_FORCEWAKEs to complete. Once root 1674 * caused, the caller must check and handle the return from this 1675 * function. 1676 */ 1677 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { 1678 ENGINE_TRACE(engine, 1679 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", 1680 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR, 1681 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR); 1682 1683 /* 1684 * Sometimes we observe that the idle flag is not 1685 * set even though the ring is empty. So double 1686 * check before giving up. 1687 */ 1688 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) != 1689 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR)) 1690 err = -ETIMEDOUT; 1691 } 1692 1693 return err; 1694 } 1695 1696 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) 1697 { 1698 ENGINE_TRACE(engine, "\n"); 1699 1700 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 1701 } 1702 1703 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) 1704 { 1705 static const i915_reg_t _reg[I915_NUM_ENGINES] = { 1706 [RCS0] = MSG_IDLE_CS, 1707 [BCS0] = MSG_IDLE_BCS, 1708 [VCS0] = MSG_IDLE_VCS0, 1709 [VCS1] = MSG_IDLE_VCS1, 1710 [VCS2] = MSG_IDLE_VCS2, 1711 [VCS3] = MSG_IDLE_VCS3, 1712 [VCS4] = MSG_IDLE_VCS4, 1713 [VCS5] = MSG_IDLE_VCS5, 1714 [VCS6] = MSG_IDLE_VCS6, 1715 [VCS7] = MSG_IDLE_VCS7, 1716 [VECS0] = MSG_IDLE_VECS0, 1717 [VECS1] = MSG_IDLE_VECS1, 1718 [VECS2] = MSG_IDLE_VECS2, 1719 [VECS3] = MSG_IDLE_VECS3, 1720 [CCS0] = MSG_IDLE_CS, 1721 [CCS1] = MSG_IDLE_CS, 1722 [CCS2] = MSG_IDLE_CS, 1723 [CCS3] = MSG_IDLE_CS, 1724 }; 1725 u32 val; 1726 1727 if (!_reg[engine->id].reg) 1728 return 0; 1729 1730 val = intel_uncore_read(engine->uncore, _reg[engine->id]); 1731 1732 /* bits[29:25] & bits[13:9] >> shift */ 1733 return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; 1734 } 1735 1736 static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) 1737 { 1738 int ret; 1739 1740 /* Ensure GPM receives fw up/down after CS is stopped */ 1741 udelay(1); 1742 1743 /* Wait for forcewake request to complete in GPM */ 1744 ret = __intel_wait_for_register_fw(gt->uncore, 1745 GEN9_PWRGT_DOMAIN_STATUS, 1746 fw_mask, fw_mask, 5000, 0, NULL); 1747 1748 /* Ensure CS receives fw ack from GPM */ 1749 udelay(1); 1750 1751 if (ret) 1752 GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); 1753 } 1754 1755 /* 1756 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any 1757 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The 1758 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the 1759 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we 1760 * are concerned only with the gt reset here, we use a logical OR of pending 1761 * forcewakeups from all reset domains and then wait for them to complete by 1762 * querying PWRGT_DOMAIN_STATUS. 1763 */ 1764 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine) 1765 { 1766 u32 fw_pending = __cs_pending_mi_force_wakes(engine); 1767 1768 if (fw_pending) 1769 __gpm_wait_for_fw_complete(engine->gt, fw_pending); 1770 } 1771 1772 /* NB: please notice the memset */ 1773 void intel_engine_get_instdone(const struct intel_engine_cs *engine, 1774 struct intel_instdone *instdone) 1775 { 1776 struct drm_i915_private *i915 = engine->i915; 1777 struct intel_uncore *uncore = engine->uncore; 1778 u32 mmio_base = engine->mmio_base; 1779 int slice; 1780 int subslice; 1781 int iter; 1782 1783 memset(instdone, 0, sizeof(*instdone)); 1784 1785 if (GRAPHICS_VER(i915) >= 8) { 1786 instdone->instdone = 1787 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1788 1789 if (engine->id != RCS0) 1790 return; 1791 1792 instdone->slice_common = 1793 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1794 if (GRAPHICS_VER(i915) >= 12) { 1795 instdone->slice_common_extra[0] = 1796 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA); 1797 instdone->slice_common_extra[1] = 1798 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2); 1799 } 1800 1801 for_each_ss_steering(iter, engine->gt, slice, subslice) { 1802 instdone->sampler[slice][subslice] = 1803 intel_gt_mcr_read(engine->gt, 1804 GEN8_SAMPLER_INSTDONE, 1805 slice, subslice); 1806 instdone->row[slice][subslice] = 1807 intel_gt_mcr_read(engine->gt, 1808 GEN8_ROW_INSTDONE, 1809 slice, subslice); 1810 } 1811 1812 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 1813 for_each_ss_steering(iter, engine->gt, slice, subslice) 1814 instdone->geom_svg[slice][subslice] = 1815 intel_gt_mcr_read(engine->gt, 1816 XEHPG_INSTDONE_GEOM_SVG, 1817 slice, subslice); 1818 } 1819 } else if (GRAPHICS_VER(i915) >= 7) { 1820 instdone->instdone = 1821 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1822 1823 if (engine->id != RCS0) 1824 return; 1825 1826 instdone->slice_common = 1827 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1828 instdone->sampler[0][0] = 1829 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); 1830 instdone->row[0][0] = 1831 intel_uncore_read(uncore, GEN7_ROW_INSTDONE); 1832 } else if (GRAPHICS_VER(i915) >= 4) { 1833 instdone->instdone = 1834 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1835 if (engine->id == RCS0) 1836 /* HACK: Using the wrong struct member */ 1837 instdone->slice_common = 1838 intel_uncore_read(uncore, GEN4_INSTDONE1); 1839 } else { 1840 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); 1841 } 1842 } 1843 1844 static bool ring_is_idle(struct intel_engine_cs *engine) 1845 { 1846 bool idle = true; 1847 1848 if (I915_SELFTEST_ONLY(!engine->mmio_base)) 1849 return true; 1850 1851 if (!intel_engine_pm_get_if_awake(engine)) 1852 return true; 1853 1854 /* First check that no commands are left in the ring */ 1855 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != 1856 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) 1857 idle = false; 1858 1859 /* No bit for gen2, so assume the CS parser is idle */ 1860 if (GRAPHICS_VER(engine->i915) > 2 && 1861 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) 1862 idle = false; 1863 1864 intel_engine_pm_put(engine); 1865 1866 return idle; 1867 } 1868 1869 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) 1870 { 1871 struct tasklet_struct *t = &engine->sched_engine->tasklet; 1872 1873 if (!t->callback) 1874 return; 1875 1876 local_bh_disable(); 1877 if (tasklet_trylock(t)) { 1878 /* Must wait for any GPU reset in progress. */ 1879 if (__tasklet_is_enabled(t)) 1880 t->callback(t); 1881 tasklet_unlock(t); 1882 } 1883 local_bh_enable(); 1884 1885 /* Synchronise and wait for the tasklet on another CPU */ 1886 if (sync) 1887 tasklet_unlock_wait(t); 1888 } 1889 1890 /** 1891 * intel_engine_is_idle() - Report if the engine has finished process all work 1892 * @engine: the intel_engine_cs 1893 * 1894 * Return true if there are no requests pending, nothing left to be submitted 1895 * to hardware, and that the engine is idle. 1896 */ 1897 bool intel_engine_is_idle(struct intel_engine_cs *engine) 1898 { 1899 /* More white lies, if wedged, hw state is inconsistent */ 1900 if (intel_gt_is_wedged(engine->gt)) 1901 return true; 1902 1903 if (!intel_engine_pm_is_awake(engine)) 1904 return true; 1905 1906 /* Waiting to drain ELSP? */ 1907 intel_synchronize_hardirq(engine->i915); 1908 intel_engine_flush_submission(engine); 1909 1910 /* ELSP is empty, but there are ready requests? E.g. after reset */ 1911 if (!i915_sched_engine_is_empty(engine->sched_engine)) 1912 return false; 1913 1914 /* Ring stopped? */ 1915 return ring_is_idle(engine); 1916 } 1917 1918 bool intel_engines_are_idle(struct intel_gt *gt) 1919 { 1920 struct intel_engine_cs *engine; 1921 enum intel_engine_id id; 1922 1923 /* 1924 * If the driver is wedged, HW state may be very inconsistent and 1925 * report that it is still busy, even though we have stopped using it. 1926 */ 1927 if (intel_gt_is_wedged(gt)) 1928 return true; 1929 1930 /* Already parked (and passed an idleness test); must still be idle */ 1931 if (!READ_ONCE(gt->awake)) 1932 return true; 1933 1934 for_each_engine(engine, gt, id) { 1935 if (!intel_engine_is_idle(engine)) 1936 return false; 1937 } 1938 1939 return true; 1940 } 1941 1942 bool intel_engine_irq_enable(struct intel_engine_cs *engine) 1943 { 1944 if (!engine->irq_enable) 1945 return false; 1946 1947 /* Caller disables interrupts */ 1948 spin_lock(engine->gt->irq_lock); 1949 engine->irq_enable(engine); 1950 spin_unlock(engine->gt->irq_lock); 1951 1952 return true; 1953 } 1954 1955 void intel_engine_irq_disable(struct intel_engine_cs *engine) 1956 { 1957 if (!engine->irq_disable) 1958 return; 1959 1960 /* Caller disables interrupts */ 1961 spin_lock(engine->gt->irq_lock); 1962 engine->irq_disable(engine); 1963 spin_unlock(engine->gt->irq_lock); 1964 } 1965 1966 void intel_engines_reset_default_submission(struct intel_gt *gt) 1967 { 1968 struct intel_engine_cs *engine; 1969 enum intel_engine_id id; 1970 1971 for_each_engine(engine, gt, id) { 1972 if (engine->sanitize) 1973 engine->sanitize(engine); 1974 1975 engine->set_default_submission(engine); 1976 } 1977 } 1978 1979 bool intel_engine_can_store_dword(struct intel_engine_cs *engine) 1980 { 1981 switch (GRAPHICS_VER(engine->i915)) { 1982 case 2: 1983 return false; /* uses physical not virtual addresses */ 1984 case 3: 1985 /* maybe only uses physical not virtual addresses */ 1986 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); 1987 case 4: 1988 return !IS_I965G(engine->i915); /* who knows! */ 1989 case 6: 1990 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ 1991 default: 1992 return true; 1993 } 1994 } 1995 1996 static struct intel_timeline *get_timeline(struct i915_request *rq) 1997 { 1998 struct intel_timeline *tl; 1999 2000 /* 2001 * Even though we are holding the engine->sched_engine->lock here, there 2002 * is no control over the submission queue per-se and we are 2003 * inspecting the active state at a random point in time, with an 2004 * unknown queue. Play safe and make sure the timeline remains valid. 2005 * (Only being used for pretty printing, one extra kref shouldn't 2006 * cause a camel stampede!) 2007 */ 2008 rcu_read_lock(); 2009 tl = rcu_dereference(rq->timeline); 2010 if (!kref_get_unless_zero(&tl->kref)) 2011 tl = NULL; 2012 rcu_read_unlock(); 2013 2014 return tl; 2015 } 2016 2017 static int print_ring(char *buf, int sz, struct i915_request *rq) 2018 { 2019 int len = 0; 2020 2021 if (!i915_request_signaled(rq)) { 2022 struct intel_timeline *tl = get_timeline(rq); 2023 2024 len = scnprintf(buf, sz, 2025 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", 2026 i915_ggtt_offset(rq->ring->vma), 2027 tl ? tl->hwsp_offset : 0, 2028 hwsp_seqno(rq), 2029 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), 2030 1000 * 1000)); 2031 2032 if (tl) 2033 intel_timeline_put(tl); 2034 } 2035 2036 return len; 2037 } 2038 2039 static void hexdump(struct drm_printer *m, const void *buf, size_t len) 2040 { 2041 const size_t rowsize = 8 * sizeof(u32); 2042 const void *prev = NULL; 2043 bool skip = false; 2044 size_t pos; 2045 2046 for (pos = 0; pos < len; pos += rowsize) { 2047 char line[128]; 2048 2049 if (prev && !memcmp(prev, buf + pos, rowsize)) { 2050 if (!skip) { 2051 drm_printf(m, "*\n"); 2052 skip = true; 2053 } 2054 continue; 2055 } 2056 2057 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, 2058 rowsize, sizeof(u32), 2059 line, sizeof(line), 2060 false) >= sizeof(line)); 2061 drm_printf(m, "[%04zx] %s\n", pos, line); 2062 2063 prev = buf + pos; 2064 skip = false; 2065 } 2066 } 2067 2068 static const char *repr_timer(const struct timer_list *t) 2069 { 2070 if (!READ_ONCE(t->expires)) 2071 return "inactive"; 2072 2073 if (timer_pending(t)) 2074 return "active"; 2075 2076 return "expired"; 2077 } 2078 2079 static void intel_engine_print_registers(struct intel_engine_cs *engine, 2080 struct drm_printer *m) 2081 { 2082 struct drm_i915_private *i915 = engine->i915; 2083 struct intel_engine_execlists * const execlists = &engine->execlists; 2084 u64 addr; 2085 2086 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7)) 2087 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); 2088 if (HAS_EXECLISTS(i915)) { 2089 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", 2090 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); 2091 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", 2092 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); 2093 } 2094 drm_printf(m, "\tRING_START: 0x%08x\n", 2095 ENGINE_READ(engine, RING_START)); 2096 drm_printf(m, "\tRING_HEAD: 0x%08x\n", 2097 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); 2098 drm_printf(m, "\tRING_TAIL: 0x%08x\n", 2099 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); 2100 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 2101 ENGINE_READ(engine, RING_CTL), 2102 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 2103 if (GRAPHICS_VER(engine->i915) > 2) { 2104 drm_printf(m, "\tRING_MODE: 0x%08x%s\n", 2105 ENGINE_READ(engine, RING_MI_MODE), 2106 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); 2107 } 2108 2109 if (GRAPHICS_VER(i915) >= 6) { 2110 drm_printf(m, "\tRING_IMR: 0x%08x\n", 2111 ENGINE_READ(engine, RING_IMR)); 2112 drm_printf(m, "\tRING_ESR: 0x%08x\n", 2113 ENGINE_READ(engine, RING_ESR)); 2114 drm_printf(m, "\tRING_EMR: 0x%08x\n", 2115 ENGINE_READ(engine, RING_EMR)); 2116 drm_printf(m, "\tRING_EIR: 0x%08x\n", 2117 ENGINE_READ(engine, RING_EIR)); 2118 } 2119 2120 addr = intel_engine_get_active_head(engine); 2121 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 2122 upper_32_bits(addr), lower_32_bits(addr)); 2123 addr = intel_engine_get_last_batch_head(engine); 2124 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", 2125 upper_32_bits(addr), lower_32_bits(addr)); 2126 if (GRAPHICS_VER(i915) >= 8) 2127 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); 2128 else if (GRAPHICS_VER(i915) >= 4) 2129 addr = ENGINE_READ(engine, RING_DMA_FADD); 2130 else 2131 addr = ENGINE_READ(engine, DMA_FADD_I8XX); 2132 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", 2133 upper_32_bits(addr), lower_32_bits(addr)); 2134 if (GRAPHICS_VER(i915) >= 4) { 2135 drm_printf(m, "\tIPEIR: 0x%08x\n", 2136 ENGINE_READ(engine, RING_IPEIR)); 2137 drm_printf(m, "\tIPEHR: 0x%08x\n", 2138 ENGINE_READ(engine, RING_IPEHR)); 2139 } else { 2140 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); 2141 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); 2142 } 2143 2144 if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) { 2145 struct i915_request * const *port, *rq; 2146 const u32 *hws = 2147 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 2148 const u8 num_entries = execlists->csb_size; 2149 unsigned int idx; 2150 u8 read, write; 2151 2152 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", 2153 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)), 2154 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)), 2155 repr_timer(&engine->execlists.preempt), 2156 repr_timer(&engine->execlists.timer)); 2157 2158 read = execlists->csb_head; 2159 write = READ_ONCE(*execlists->csb_write); 2160 2161 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n", 2162 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), 2163 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), 2164 read, write, num_entries); 2165 2166 if (read >= num_entries) 2167 read = 0; 2168 if (write >= num_entries) 2169 write = 0; 2170 if (read > write) 2171 write += num_entries; 2172 while (read < write) { 2173 idx = ++read % num_entries; 2174 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 2175 idx, hws[idx * 2], hws[idx * 2 + 1]); 2176 } 2177 2178 i915_sched_engine_active_lock_bh(engine->sched_engine); 2179 rcu_read_lock(); 2180 for (port = execlists->active; (rq = *port); port++) { 2181 char hdr[160]; 2182 int len; 2183 2184 len = scnprintf(hdr, sizeof(hdr), 2185 "\t\tActive[%d]: ccid:%08x%s%s, ", 2186 (int)(port - execlists->active), 2187 rq->context->lrc.ccid, 2188 intel_context_is_closed(rq->context) ? "!" : "", 2189 intel_context_is_banned(rq->context) ? "*" : ""); 2190 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2191 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2192 i915_request_show(m, rq, hdr, 0); 2193 } 2194 for (port = execlists->pending; (rq = *port); port++) { 2195 char hdr[160]; 2196 int len; 2197 2198 len = scnprintf(hdr, sizeof(hdr), 2199 "\t\tPending[%d]: ccid:%08x%s%s, ", 2200 (int)(port - execlists->pending), 2201 rq->context->lrc.ccid, 2202 intel_context_is_closed(rq->context) ? "!" : "", 2203 intel_context_is_banned(rq->context) ? "*" : ""); 2204 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2205 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2206 i915_request_show(m, rq, hdr, 0); 2207 } 2208 rcu_read_unlock(); 2209 i915_sched_engine_active_unlock_bh(engine->sched_engine); 2210 } else if (GRAPHICS_VER(i915) > 6) { 2211 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 2212 ENGINE_READ(engine, RING_PP_DIR_BASE)); 2213 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 2214 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); 2215 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 2216 ENGINE_READ(engine, RING_PP_DIR_DCLV)); 2217 } 2218 } 2219 2220 static void print_request_ring(struct drm_printer *m, struct i915_request *rq) 2221 { 2222 struct i915_vma_resource *vma_res = rq->batch_res; 2223 void *ring; 2224 int size; 2225 2226 drm_printf(m, 2227 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", 2228 rq->head, rq->postfix, rq->tail, 2229 vma_res ? upper_32_bits(vma_res->start) : ~0u, 2230 vma_res ? lower_32_bits(vma_res->start) : ~0u); 2231 2232 size = rq->tail - rq->head; 2233 if (rq->tail < rq->head) 2234 size += rq->ring->size; 2235 2236 ring = kmalloc(size, GFP_ATOMIC); 2237 if (ring) { 2238 const void *vaddr = rq->ring->vaddr; 2239 unsigned int head = rq->head; 2240 unsigned int len = 0; 2241 2242 if (rq->tail < head) { 2243 len = rq->ring->size - head; 2244 memcpy(ring, vaddr + head, len); 2245 head = 0; 2246 } 2247 memcpy(ring + len, vaddr + head, size - len); 2248 2249 hexdump(m, ring, size); 2250 kfree(ring); 2251 } 2252 } 2253 2254 static unsigned long read_ul(void *p, size_t x) 2255 { 2256 return *(unsigned long *)(p + x); 2257 } 2258 2259 static void print_properties(struct intel_engine_cs *engine, 2260 struct drm_printer *m) 2261 { 2262 static const struct pmap { 2263 size_t offset; 2264 const char *name; 2265 } props[] = { 2266 #define P(x) { \ 2267 .offset = offsetof(typeof(engine->props), x), \ 2268 .name = #x \ 2269 } 2270 P(heartbeat_interval_ms), 2271 P(max_busywait_duration_ns), 2272 P(preempt_timeout_ms), 2273 P(stop_timeout_ms), 2274 P(timeslice_duration_ms), 2275 2276 {}, 2277 #undef P 2278 }; 2279 const struct pmap *p; 2280 2281 drm_printf(m, "\tProperties:\n"); 2282 for (p = props; p->name; p++) 2283 drm_printf(m, "\t\t%s: %lu [default %lu]\n", 2284 p->name, 2285 read_ul(&engine->props, p->offset), 2286 read_ul(&engine->defaults, p->offset)); 2287 } 2288 2289 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg) 2290 { 2291 struct intel_timeline *tl = get_timeline(rq); 2292 2293 i915_request_show(m, rq, msg, 0); 2294 2295 drm_printf(m, "\t\tring->start: 0x%08x\n", 2296 i915_ggtt_offset(rq->ring->vma)); 2297 drm_printf(m, "\t\tring->head: 0x%08x\n", 2298 rq->ring->head); 2299 drm_printf(m, "\t\tring->tail: 0x%08x\n", 2300 rq->ring->tail); 2301 drm_printf(m, "\t\tring->emit: 0x%08x\n", 2302 rq->ring->emit); 2303 drm_printf(m, "\t\tring->space: 0x%08x\n", 2304 rq->ring->space); 2305 2306 if (tl) { 2307 drm_printf(m, "\t\tring->hwsp: 0x%08x\n", 2308 tl->hwsp_offset); 2309 intel_timeline_put(tl); 2310 } 2311 2312 print_request_ring(m, rq); 2313 2314 if (rq->context->lrc_reg_state) { 2315 drm_printf(m, "Logical Ring Context:\n"); 2316 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE); 2317 } 2318 } 2319 2320 void intel_engine_dump_active_requests(struct list_head *requests, 2321 struct i915_request *hung_rq, 2322 struct drm_printer *m) 2323 { 2324 struct i915_request *rq; 2325 const char *msg; 2326 enum i915_request_state state; 2327 2328 list_for_each_entry(rq, requests, sched.link) { 2329 if (rq == hung_rq) 2330 continue; 2331 2332 state = i915_test_request_state(rq); 2333 if (state < I915_REQUEST_QUEUED) 2334 continue; 2335 2336 if (state == I915_REQUEST_ACTIVE) 2337 msg = "\t\tactive on engine"; 2338 else 2339 msg = "\t\tactive in queue"; 2340 2341 engine_dump_request(rq, m, msg); 2342 } 2343 } 2344 2345 static void engine_dump_active_requests(struct intel_engine_cs *engine, 2346 struct drm_printer *m) 2347 { 2348 struct intel_context *hung_ce = NULL; 2349 struct i915_request *hung_rq = NULL; 2350 2351 /* 2352 * No need for an engine->irq_seqno_barrier() before the seqno reads. 2353 * The GPU is still running so requests are still executing and any 2354 * hardware reads will be out of date by the time they are reported. 2355 * But the intention here is just to report an instantaneous snapshot 2356 * so that's fine. 2357 */ 2358 intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq); 2359 2360 drm_printf(m, "\tRequests:\n"); 2361 2362 if (hung_rq) 2363 engine_dump_request(hung_rq, m, "\t\thung"); 2364 else if (hung_ce) 2365 drm_printf(m, "\t\tGot hung ce but no hung rq!\n"); 2366 2367 if (intel_uc_uses_guc_submission(&engine->gt->uc)) 2368 intel_guc_dump_active_requests(engine, hung_rq, m); 2369 else 2370 intel_execlists_dump_active_requests(engine, hung_rq, m); 2371 2372 if (hung_rq) 2373 i915_request_put(hung_rq); 2374 } 2375 2376 void intel_engine_dump(struct intel_engine_cs *engine, 2377 struct drm_printer *m, 2378 const char *header, ...) 2379 { 2380 struct i915_gpu_error * const error = &engine->i915->gpu_error; 2381 struct i915_request *rq; 2382 intel_wakeref_t wakeref; 2383 ktime_t dummy; 2384 2385 if (header) { 2386 va_list ap; 2387 2388 va_start(ap, header); 2389 drm_vprintf(m, header, &ap); 2390 va_end(ap); 2391 } 2392 2393 if (intel_gt_is_wedged(engine->gt)) 2394 drm_printf(m, "*** WEDGED ***\n"); 2395 2396 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); 2397 drm_printf(m, "\tBarriers?: %s\n", 2398 str_yes_no(!llist_empty(&engine->barrier_tasks))); 2399 drm_printf(m, "\tLatency: %luus\n", 2400 ewma__engine_latency_read(&engine->latency)); 2401 if (intel_engine_supports_stats(engine)) 2402 drm_printf(m, "\tRuntime: %llums\n", 2403 ktime_to_ms(intel_engine_get_busy_time(engine, 2404 &dummy))); 2405 drm_printf(m, "\tForcewake: %x domains, %d active\n", 2406 engine->fw_domain, READ_ONCE(engine->fw_active)); 2407 2408 rcu_read_lock(); 2409 rq = READ_ONCE(engine->heartbeat.systole); 2410 if (rq) 2411 drm_printf(m, "\tHeartbeat: %d ms ago\n", 2412 jiffies_to_msecs(jiffies - rq->emitted_jiffies)); 2413 rcu_read_unlock(); 2414 drm_printf(m, "\tReset count: %d (global %d)\n", 2415 i915_reset_engine_count(error, engine), 2416 i915_reset_count(error)); 2417 print_properties(engine, m); 2418 2419 engine_dump_active_requests(engine, m); 2420 2421 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); 2422 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm); 2423 if (wakeref) { 2424 intel_engine_print_registers(engine, m); 2425 intel_runtime_pm_put(engine->uncore->rpm, wakeref); 2426 } else { 2427 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 2428 } 2429 2430 intel_execlists_show_requests(engine, m, i915_request_show, 8); 2431 2432 drm_printf(m, "HWSP:\n"); 2433 hexdump(m, engine->status_page.addr, PAGE_SIZE); 2434 2435 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine))); 2436 2437 intel_engine_print_breadcrumbs(engine, m); 2438 } 2439 2440 /** 2441 * intel_engine_get_busy_time() - Return current accumulated engine busyness 2442 * @engine: engine to report on 2443 * @now: monotonic timestamp of sampling 2444 * 2445 * Returns accumulated time @engine was busy since engine stats were enabled. 2446 */ 2447 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) 2448 { 2449 return engine->busyness(engine, now); 2450 } 2451 2452 struct intel_context * 2453 intel_engine_create_virtual(struct intel_engine_cs **siblings, 2454 unsigned int count, unsigned long flags) 2455 { 2456 if (count == 0) 2457 return ERR_PTR(-EINVAL); 2458 2459 if (count == 1 && !(flags & FORCE_VIRTUAL)) 2460 return intel_context_create(siblings[0]); 2461 2462 GEM_BUG_ON(!siblings[0]->cops->create_virtual); 2463 return siblings[0]->cops->create_virtual(siblings, count, flags); 2464 } 2465 2466 static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine) 2467 { 2468 struct i915_request *request, *active = NULL; 2469 2470 /* 2471 * This search does not work in GuC submission mode. However, the GuC 2472 * will report the hanging context directly to the driver itself. So 2473 * the driver should never get here when in GuC mode. 2474 */ 2475 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc)); 2476 2477 /* 2478 * We are called by the error capture, reset and to dump engine 2479 * state at random points in time. In particular, note that neither is 2480 * crucially ordered with an interrupt. After a hang, the GPU is dead 2481 * and we assume that no more writes can happen (we waited long enough 2482 * for all writes that were in transaction to be flushed) - adding an 2483 * extra delay for a recent interrupt is pointless. Hence, we do 2484 * not need an engine->irq_seqno_barrier() before the seqno reads. 2485 * At all other times, we must assume the GPU is still running, but 2486 * we only care about the snapshot of this moment. 2487 */ 2488 lockdep_assert_held(&engine->sched_engine->lock); 2489 2490 rcu_read_lock(); 2491 request = execlists_active(&engine->execlists); 2492 if (request) { 2493 struct intel_timeline *tl = request->context->timeline; 2494 2495 list_for_each_entry_from_reverse(request, &tl->requests, link) { 2496 if (__i915_request_is_complete(request)) 2497 break; 2498 2499 active = request; 2500 } 2501 } 2502 rcu_read_unlock(); 2503 if (active) 2504 return active; 2505 2506 list_for_each_entry(request, &engine->sched_engine->requests, 2507 sched.link) { 2508 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE) 2509 continue; 2510 2511 active = request; 2512 break; 2513 } 2514 2515 return active; 2516 } 2517 2518 void intel_engine_get_hung_entity(struct intel_engine_cs *engine, 2519 struct intel_context **ce, struct i915_request **rq) 2520 { 2521 unsigned long flags; 2522 2523 *ce = intel_engine_get_hung_context(engine); 2524 if (*ce) { 2525 intel_engine_clear_hung_context(engine); 2526 2527 *rq = intel_context_get_active_request(*ce); 2528 return; 2529 } 2530 2531 /* 2532 * Getting here with GuC enabled means it is a forced error capture 2533 * with no actual hang. So, no need to attempt the execlist search. 2534 */ 2535 if (intel_uc_uses_guc_submission(&engine->gt->uc)) 2536 return; 2537 2538 spin_lock_irqsave(&engine->sched_engine->lock, flags); 2539 *rq = engine_execlist_find_hung_request(engine); 2540 if (*rq) 2541 *rq = i915_request_get_rcu(*rq); 2542 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 2543 } 2544 2545 void xehp_enable_ccs_engines(struct intel_engine_cs *engine) 2546 { 2547 /* 2548 * If there are any non-fused-off CCS engines, we need to enable CCS 2549 * support in the RCU_MODE register. This only needs to be done once, 2550 * so for simplicity we'll take care of this in the RCS engine's 2551 * resume handler; since the RCS and all CCS engines belong to the 2552 * same reset domain and are reset together, this will also take care 2553 * of re-applying the setting after i915-triggered resets. 2554 */ 2555 if (!CCS_MASK(engine->gt)) 2556 return; 2557 2558 intel_uncore_write(engine->uncore, GEN12_RCU_MODE, 2559 _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE)); 2560 } 2561 2562 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2563 #include "mock_engine.c" 2564 #include "selftest_engine.c" 2565 #include "selftest_engine_cs.c" 2566 #endif 2567