1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/drm_print.h> 9 10 #include "gem/i915_gem_context.h" 11 #include "gem/i915_gem_internal.h" 12 #include "gt/intel_gt_regs.h" 13 14 #include "i915_cmd_parser.h" 15 #include "i915_drv.h" 16 #include "intel_breadcrumbs.h" 17 #include "intel_context.h" 18 #include "intel_engine.h" 19 #include "intel_engine_pm.h" 20 #include "intel_engine_regs.h" 21 #include "intel_engine_user.h" 22 #include "intel_execlists_submission.h" 23 #include "intel_gt.h" 24 #include "intel_gt_mcr.h" 25 #include "intel_gt_pm.h" 26 #include "intel_gt_requests.h" 27 #include "intel_lrc.h" 28 #include "intel_lrc_reg.h" 29 #include "intel_reset.h" 30 #include "intel_ring.h" 31 #include "uc/intel_guc_submission.h" 32 33 /* Haswell does have the CXT_SIZE register however it does not appear to be 34 * valid. Now, docs explain in dwords what is in the context object. The full 35 * size is 70720 bytes, however, the power context and execlist context will 36 * never be saved (power context is stored elsewhere, and execlists don't work 37 * on HSW) - so the final size, including the extra state required for the 38 * Resource Streamer, is 66944 bytes, which rounds to 17 pages. 39 */ 40 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 41 42 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 43 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 44 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 45 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) 46 47 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 48 49 #define MAX_MMIO_BASES 3 50 struct engine_info { 51 u8 class; 52 u8 instance; 53 /* mmio bases table *must* be sorted in reverse graphics_ver order */ 54 struct engine_mmio_base { 55 u32 graphics_ver : 8; 56 u32 base : 24; 57 } mmio_bases[MAX_MMIO_BASES]; 58 }; 59 60 static const struct engine_info intel_engines[] = { 61 [RCS0] = { 62 .class = RENDER_CLASS, 63 .instance = 0, 64 .mmio_bases = { 65 { .graphics_ver = 1, .base = RENDER_RING_BASE } 66 }, 67 }, 68 [BCS0] = { 69 .class = COPY_ENGINE_CLASS, 70 .instance = 0, 71 .mmio_bases = { 72 { .graphics_ver = 6, .base = BLT_RING_BASE } 73 }, 74 }, 75 [BCS1] = { 76 .class = COPY_ENGINE_CLASS, 77 .instance = 1, 78 .mmio_bases = { 79 { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE } 80 }, 81 }, 82 [BCS2] = { 83 .class = COPY_ENGINE_CLASS, 84 .instance = 2, 85 .mmio_bases = { 86 { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE } 87 }, 88 }, 89 [BCS3] = { 90 .class = COPY_ENGINE_CLASS, 91 .instance = 3, 92 .mmio_bases = { 93 { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE } 94 }, 95 }, 96 [BCS4] = { 97 .class = COPY_ENGINE_CLASS, 98 .instance = 4, 99 .mmio_bases = { 100 { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE } 101 }, 102 }, 103 [BCS5] = { 104 .class = COPY_ENGINE_CLASS, 105 .instance = 5, 106 .mmio_bases = { 107 { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE } 108 }, 109 }, 110 [BCS6] = { 111 .class = COPY_ENGINE_CLASS, 112 .instance = 6, 113 .mmio_bases = { 114 { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE } 115 }, 116 }, 117 [BCS7] = { 118 .class = COPY_ENGINE_CLASS, 119 .instance = 7, 120 .mmio_bases = { 121 { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE } 122 }, 123 }, 124 [BCS8] = { 125 .class = COPY_ENGINE_CLASS, 126 .instance = 8, 127 .mmio_bases = { 128 { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE } 129 }, 130 }, 131 [VCS0] = { 132 .class = VIDEO_DECODE_CLASS, 133 .instance = 0, 134 .mmio_bases = { 135 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE }, 136 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE }, 137 { .graphics_ver = 4, .base = BSD_RING_BASE } 138 }, 139 }, 140 [VCS1] = { 141 .class = VIDEO_DECODE_CLASS, 142 .instance = 1, 143 .mmio_bases = { 144 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE }, 145 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE } 146 }, 147 }, 148 [VCS2] = { 149 .class = VIDEO_DECODE_CLASS, 150 .instance = 2, 151 .mmio_bases = { 152 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE } 153 }, 154 }, 155 [VCS3] = { 156 .class = VIDEO_DECODE_CLASS, 157 .instance = 3, 158 .mmio_bases = { 159 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE } 160 }, 161 }, 162 [VCS4] = { 163 .class = VIDEO_DECODE_CLASS, 164 .instance = 4, 165 .mmio_bases = { 166 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE } 167 }, 168 }, 169 [VCS5] = { 170 .class = VIDEO_DECODE_CLASS, 171 .instance = 5, 172 .mmio_bases = { 173 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE } 174 }, 175 }, 176 [VCS6] = { 177 .class = VIDEO_DECODE_CLASS, 178 .instance = 6, 179 .mmio_bases = { 180 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE } 181 }, 182 }, 183 [VCS7] = { 184 .class = VIDEO_DECODE_CLASS, 185 .instance = 7, 186 .mmio_bases = { 187 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE } 188 }, 189 }, 190 [VECS0] = { 191 .class = VIDEO_ENHANCEMENT_CLASS, 192 .instance = 0, 193 .mmio_bases = { 194 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE }, 195 { .graphics_ver = 7, .base = VEBOX_RING_BASE } 196 }, 197 }, 198 [VECS1] = { 199 .class = VIDEO_ENHANCEMENT_CLASS, 200 .instance = 1, 201 .mmio_bases = { 202 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE } 203 }, 204 }, 205 [VECS2] = { 206 .class = VIDEO_ENHANCEMENT_CLASS, 207 .instance = 2, 208 .mmio_bases = { 209 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE } 210 }, 211 }, 212 [VECS3] = { 213 .class = VIDEO_ENHANCEMENT_CLASS, 214 .instance = 3, 215 .mmio_bases = { 216 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE } 217 }, 218 }, 219 [CCS0] = { 220 .class = COMPUTE_CLASS, 221 .instance = 0, 222 .mmio_bases = { 223 { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE } 224 } 225 }, 226 [CCS1] = { 227 .class = COMPUTE_CLASS, 228 .instance = 1, 229 .mmio_bases = { 230 { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE } 231 } 232 }, 233 [CCS2] = { 234 .class = COMPUTE_CLASS, 235 .instance = 2, 236 .mmio_bases = { 237 { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE } 238 } 239 }, 240 [CCS3] = { 241 .class = COMPUTE_CLASS, 242 .instance = 3, 243 .mmio_bases = { 244 { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE } 245 } 246 }, 247 [GSC0] = { 248 .class = OTHER_CLASS, 249 .instance = OTHER_GSC_INSTANCE, 250 .mmio_bases = { 251 { .graphics_ver = 12, .base = MTL_GSC_RING_BASE } 252 } 253 }, 254 }; 255 256 /** 257 * intel_engine_context_size() - return the size of the context for an engine 258 * @gt: the gt 259 * @class: engine class 260 * 261 * Each engine class may require a different amount of space for a context 262 * image. 263 * 264 * Return: size (in bytes) of an engine class specific context image 265 * 266 * Note: this size includes the HWSP, which is part of the context image 267 * in LRC mode, but does not include the "shared data page" used with 268 * GuC submission. The caller should account for this if using the GuC. 269 */ 270 u32 intel_engine_context_size(struct intel_gt *gt, u8 class) 271 { 272 struct intel_uncore *uncore = gt->uncore; 273 u32 cxt_size; 274 275 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); 276 277 switch (class) { 278 case COMPUTE_CLASS: 279 fallthrough; 280 case RENDER_CLASS: 281 switch (GRAPHICS_VER(gt->i915)) { 282 default: 283 MISSING_CASE(GRAPHICS_VER(gt->i915)); 284 return DEFAULT_LR_CONTEXT_RENDER_SIZE; 285 case 12: 286 case 11: 287 return GEN11_LR_CONTEXT_RENDER_SIZE; 288 case 9: 289 return GEN9_LR_CONTEXT_RENDER_SIZE; 290 case 8: 291 return GEN8_LR_CONTEXT_RENDER_SIZE; 292 case 7: 293 if (IS_HASWELL(gt->i915)) 294 return HSW_CXT_TOTAL_SIZE; 295 296 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE); 297 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, 298 PAGE_SIZE); 299 case 6: 300 cxt_size = intel_uncore_read(uncore, CXT_SIZE); 301 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, 302 PAGE_SIZE); 303 case 5: 304 case 4: 305 /* 306 * There is a discrepancy here between the size reported 307 * by the register and the size of the context layout 308 * in the docs. Both are described as authorative! 309 * 310 * The discrepancy is on the order of a few cachelines, 311 * but the total is under one page (4k), which is our 312 * minimum allocation anyway so it should all come 313 * out in the wash. 314 */ 315 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; 316 drm_dbg(>->i915->drm, 317 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", 318 GRAPHICS_VER(gt->i915), cxt_size * 64, 319 cxt_size - 1); 320 return round_up(cxt_size * 64, PAGE_SIZE); 321 case 3: 322 case 2: 323 /* For the special day when i810 gets merged. */ 324 case 1: 325 return 0; 326 } 327 break; 328 default: 329 MISSING_CASE(class); 330 fallthrough; 331 case VIDEO_DECODE_CLASS: 332 case VIDEO_ENHANCEMENT_CLASS: 333 case COPY_ENGINE_CLASS: 334 case OTHER_CLASS: 335 if (GRAPHICS_VER(gt->i915) < 8) 336 return 0; 337 return GEN8_LR_CONTEXT_OTHER_SIZE; 338 } 339 } 340 341 static u32 __engine_mmio_base(struct drm_i915_private *i915, 342 const struct engine_mmio_base *bases) 343 { 344 int i; 345 346 for (i = 0; i < MAX_MMIO_BASES; i++) 347 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver) 348 break; 349 350 GEM_BUG_ON(i == MAX_MMIO_BASES); 351 GEM_BUG_ON(!bases[i].base); 352 353 return bases[i].base; 354 } 355 356 static void __sprint_engine_name(struct intel_engine_cs *engine) 357 { 358 /* 359 * Before we know what the uABI name for this engine will be, 360 * we still would like to keep track of this engine in the debug logs. 361 * We throw in a ' here as a reminder that this isn't its final name. 362 */ 363 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", 364 intel_engine_class_repr(engine->class), 365 engine->instance) >= sizeof(engine->name)); 366 } 367 368 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) 369 { 370 /* 371 * Though they added more rings on g4x/ilk, they did not add 372 * per-engine HWSTAM until gen6. 373 */ 374 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS) 375 return; 376 377 if (GRAPHICS_VER(engine->i915) >= 3) 378 ENGINE_WRITE(engine, RING_HWSTAM, mask); 379 else 380 ENGINE_WRITE16(engine, RING_HWSTAM, mask); 381 } 382 383 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) 384 { 385 /* Mask off all writes into the unknown HWSP */ 386 intel_engine_set_hwsp_writemask(engine, ~0u); 387 } 388 389 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) 390 { 391 GEM_DEBUG_WARN_ON(iir); 392 } 393 394 static u32 get_reset_domain(u8 ver, enum intel_engine_id id) 395 { 396 u32 reset_domain; 397 398 if (ver >= 11) { 399 static const u32 engine_reset_domains[] = { 400 [RCS0] = GEN11_GRDOM_RENDER, 401 [BCS0] = GEN11_GRDOM_BLT, 402 [BCS1] = XEHPC_GRDOM_BLT1, 403 [BCS2] = XEHPC_GRDOM_BLT2, 404 [BCS3] = XEHPC_GRDOM_BLT3, 405 [BCS4] = XEHPC_GRDOM_BLT4, 406 [BCS5] = XEHPC_GRDOM_BLT5, 407 [BCS6] = XEHPC_GRDOM_BLT6, 408 [BCS7] = XEHPC_GRDOM_BLT7, 409 [BCS8] = XEHPC_GRDOM_BLT8, 410 [VCS0] = GEN11_GRDOM_MEDIA, 411 [VCS1] = GEN11_GRDOM_MEDIA2, 412 [VCS2] = GEN11_GRDOM_MEDIA3, 413 [VCS3] = GEN11_GRDOM_MEDIA4, 414 [VCS4] = GEN11_GRDOM_MEDIA5, 415 [VCS5] = GEN11_GRDOM_MEDIA6, 416 [VCS6] = GEN11_GRDOM_MEDIA7, 417 [VCS7] = GEN11_GRDOM_MEDIA8, 418 [VECS0] = GEN11_GRDOM_VECS, 419 [VECS1] = GEN11_GRDOM_VECS2, 420 [VECS2] = GEN11_GRDOM_VECS3, 421 [VECS3] = GEN11_GRDOM_VECS4, 422 [CCS0] = GEN11_GRDOM_RENDER, 423 [CCS1] = GEN11_GRDOM_RENDER, 424 [CCS2] = GEN11_GRDOM_RENDER, 425 [CCS3] = GEN11_GRDOM_RENDER, 426 [GSC0] = GEN12_GRDOM_GSC, 427 }; 428 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 429 !engine_reset_domains[id]); 430 reset_domain = engine_reset_domains[id]; 431 } else { 432 static const u32 engine_reset_domains[] = { 433 [RCS0] = GEN6_GRDOM_RENDER, 434 [BCS0] = GEN6_GRDOM_BLT, 435 [VCS0] = GEN6_GRDOM_MEDIA, 436 [VCS1] = GEN8_GRDOM_MEDIA2, 437 [VECS0] = GEN6_GRDOM_VECS, 438 }; 439 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 440 !engine_reset_domains[id]); 441 reset_domain = engine_reset_domains[id]; 442 } 443 444 return reset_domain; 445 } 446 447 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, 448 u8 logical_instance) 449 { 450 const struct engine_info *info = &intel_engines[id]; 451 struct drm_i915_private *i915 = gt->i915; 452 struct intel_engine_cs *engine; 453 u8 guc_class; 454 455 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 456 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 457 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1)); 458 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1)); 459 460 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine))) 461 return -EINVAL; 462 463 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) 464 return -EINVAL; 465 466 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 467 return -EINVAL; 468 469 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance])) 470 return -EINVAL; 471 472 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 473 if (!engine) 474 return -ENOMEM; 475 476 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); 477 478 INIT_LIST_HEAD(&engine->pinned_contexts_list); 479 engine->id = id; 480 engine->legacy_idx = INVALID_ENGINE; 481 engine->mask = BIT(id); 482 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915), 483 id); 484 engine->i915 = i915; 485 engine->gt = gt; 486 engine->uncore = gt->uncore; 487 guc_class = engine_class_to_guc_class(info->class); 488 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance); 489 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); 490 491 engine->irq_handler = nop_irq_handler; 492 493 engine->class = info->class; 494 engine->instance = info->instance; 495 engine->logical_mask = BIT(logical_instance); 496 __sprint_engine_name(engine); 497 498 if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && 499 __ffs(CCS_MASK(engine->gt)) == engine->instance) || 500 engine->class == RENDER_CLASS) 501 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; 502 503 /* features common between engines sharing EUs */ 504 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { 505 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; 506 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY; 507 } 508 509 engine->props.heartbeat_interval_ms = 510 CONFIG_DRM_I915_HEARTBEAT_INTERVAL; 511 engine->props.max_busywait_duration_ns = 512 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT; 513 engine->props.preempt_timeout_ms = 514 CONFIG_DRM_I915_PREEMPT_TIMEOUT; 515 engine->props.stop_timeout_ms = 516 CONFIG_DRM_I915_STOP_TIMEOUT; 517 engine->props.timeslice_duration_ms = 518 CONFIG_DRM_I915_TIMESLICE_DURATION; 519 520 /* 521 * Mid-thread pre-emption is not available in Gen12. Unfortunately, 522 * some compute workloads run quite long threads. That means they get 523 * reset due to not pre-empting in a timely manner. So, bump the 524 * pre-emption timeout value to be much higher for compute engines. 525 */ 526 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) 527 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE; 528 529 /* Cap properties according to any system limits */ 530 #define CLAMP_PROP(field) \ 531 do { \ 532 u64 clamp = intel_clamp_##field(engine, engine->props.field); \ 533 if (clamp != engine->props.field) { \ 534 drm_notice(&engine->i915->drm, \ 535 "Warning, clamping %s to %lld to prevent overflow\n", \ 536 #field, clamp); \ 537 engine->props.field = clamp; \ 538 } \ 539 } while (0) 540 541 CLAMP_PROP(heartbeat_interval_ms); 542 CLAMP_PROP(max_busywait_duration_ns); 543 CLAMP_PROP(preempt_timeout_ms); 544 CLAMP_PROP(stop_timeout_ms); 545 CLAMP_PROP(timeslice_duration_ms); 546 547 #undef CLAMP_PROP 548 549 engine->defaults = engine->props; /* never to change again */ 550 551 engine->context_size = intel_engine_context_size(gt, engine->class); 552 if (WARN_ON(engine->context_size > BIT(20))) 553 engine->context_size = 0; 554 if (engine->context_size) 555 DRIVER_CAPS(i915)->has_logical_contexts = true; 556 557 ewma__engine_latency_init(&engine->latency); 558 seqcount_init(&engine->stats.execlists.lock); 559 560 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 561 562 /* Scrub mmio state on takeover */ 563 intel_engine_sanitize_mmio(engine); 564 565 gt->engine_class[info->class][info->instance] = engine; 566 gt->engine[id] = engine; 567 568 return 0; 569 } 570 571 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value) 572 { 573 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 574 575 return value; 576 } 577 578 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value) 579 { 580 value = min(value, jiffies_to_nsecs(2)); 581 582 return value; 583 } 584 585 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value) 586 { 587 /* 588 * NB: The GuC API only supports 32bit values. However, the limit is further 589 * reduced due to internal calculations which would otherwise overflow. 590 */ 591 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 592 value = min_t(u64, value, guc_policy_max_preempt_timeout_ms()); 593 594 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 595 596 return value; 597 } 598 599 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value) 600 { 601 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 602 603 return value; 604 } 605 606 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value) 607 { 608 /* 609 * NB: The GuC API only supports 32bit values. However, the limit is further 610 * reduced due to internal calculations which would otherwise overflow. 611 */ 612 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 613 value = min_t(u64, value, guc_policy_max_exec_quantum_ms()); 614 615 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 616 617 return value; 618 } 619 620 static void __setup_engine_capabilities(struct intel_engine_cs *engine) 621 { 622 struct drm_i915_private *i915 = engine->i915; 623 624 if (engine->class == VIDEO_DECODE_CLASS) { 625 /* 626 * HEVC support is present on first engine instance 627 * before Gen11 and on all instances afterwards. 628 */ 629 if (GRAPHICS_VER(i915) >= 11 || 630 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 631 engine->uabi_capabilities |= 632 I915_VIDEO_CLASS_CAPABILITY_HEVC; 633 634 /* 635 * SFC block is present only on even logical engine 636 * instances. 637 */ 638 if ((GRAPHICS_VER(i915) >= 11 && 639 (engine->gt->info.vdbox_sfc_access & 640 BIT(engine->instance))) || 641 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 642 engine->uabi_capabilities |= 643 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 644 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { 645 if (GRAPHICS_VER(i915) >= 9 && 646 engine->gt->info.sfc_mask & BIT(engine->instance)) 647 engine->uabi_capabilities |= 648 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 649 } 650 } 651 652 static void intel_setup_engine_capabilities(struct intel_gt *gt) 653 { 654 struct intel_engine_cs *engine; 655 enum intel_engine_id id; 656 657 for_each_engine(engine, gt, id) 658 __setup_engine_capabilities(engine); 659 } 660 661 /** 662 * intel_engines_release() - free the resources allocated for Command Streamers 663 * @gt: pointer to struct intel_gt 664 */ 665 void intel_engines_release(struct intel_gt *gt) 666 { 667 struct intel_engine_cs *engine; 668 enum intel_engine_id id; 669 670 /* 671 * Before we release the resources held by engine, we must be certain 672 * that the HW is no longer accessing them -- having the GPU scribble 673 * to or read from a page being used for something else causes no end 674 * of fun. 675 * 676 * The GPU should be reset by this point, but assume the worst just 677 * in case we aborted before completely initialising the engines. 678 */ 679 GEM_BUG_ON(intel_gt_pm_is_awake(gt)); 680 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 681 __intel_gt_reset(gt, ALL_ENGINES); 682 683 /* Decouple the backend; but keep the layout for late GPU resets */ 684 for_each_engine(engine, gt, id) { 685 if (!engine->release) 686 continue; 687 688 intel_wakeref_wait_for_idle(&engine->wakeref); 689 GEM_BUG_ON(intel_engine_pm_is_awake(engine)); 690 691 engine->release(engine); 692 engine->release = NULL; 693 694 memset(&engine->reset, 0, sizeof(engine->reset)); 695 } 696 } 697 698 void intel_engine_free_request_pool(struct intel_engine_cs *engine) 699 { 700 if (!engine->request_pool) 701 return; 702 703 kmem_cache_free(i915_request_slab_cache(), engine->request_pool); 704 } 705 706 void intel_engines_free(struct intel_gt *gt) 707 { 708 struct intel_engine_cs *engine; 709 enum intel_engine_id id; 710 711 /* Free the requests! dma-resv keeps fences around for an eternity */ 712 rcu_barrier(); 713 714 for_each_engine(engine, gt, id) { 715 intel_engine_free_request_pool(engine); 716 kfree(engine); 717 gt->engine[id] = NULL; 718 } 719 } 720 721 static 722 bool gen11_vdbox_has_sfc(struct intel_gt *gt, 723 unsigned int physical_vdbox, 724 unsigned int logical_vdbox, u16 vdbox_mask) 725 { 726 struct drm_i915_private *i915 = gt->i915; 727 728 /* 729 * In Gen11, only even numbered logical VDBOXes are hooked 730 * up to an SFC (Scaler & Format Converter) unit. 731 * In Gen12, Even numbered physical instance always are connected 732 * to an SFC. Odd numbered physical instances have SFC only if 733 * previous even instance is fused off. 734 * 735 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field 736 * in the fuse register that tells us whether a specific SFC is present. 737 */ 738 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) 739 return false; 740 else if (MEDIA_VER(i915) >= 12) 741 return (physical_vdbox % 2 == 0) || 742 !(BIT(physical_vdbox - 1) & vdbox_mask); 743 else if (MEDIA_VER(i915) == 11) 744 return logical_vdbox % 2 == 0; 745 746 return false; 747 } 748 749 static void engine_mask_apply_media_fuses(struct intel_gt *gt) 750 { 751 struct drm_i915_private *i915 = gt->i915; 752 unsigned int logical_vdbox = 0; 753 unsigned int i; 754 u32 media_fuse, fuse1; 755 u16 vdbox_mask; 756 u16 vebox_mask; 757 758 if (MEDIA_VER(gt->i915) < 11) 759 return; 760 761 /* 762 * On newer platforms the fusing register is called 'enable' and has 763 * enable semantics, while on older platforms it is called 'disable' 764 * and bits have disable semantices. 765 */ 766 media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); 767 if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) 768 media_fuse = ~media_fuse; 769 770 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 771 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 772 GEN11_GT_VEBOX_DISABLE_SHIFT; 773 774 if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { 775 fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); 776 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); 777 } else { 778 gt->info.sfc_mask = ~0; 779 } 780 781 for (i = 0; i < I915_MAX_VCS; i++) { 782 if (!HAS_ENGINE(gt, _VCS(i))) { 783 vdbox_mask &= ~BIT(i); 784 continue; 785 } 786 787 if (!(BIT(i) & vdbox_mask)) { 788 gt->info.engine_mask &= ~BIT(_VCS(i)); 789 drm_dbg(&i915->drm, "vcs%u fused off\n", i); 790 continue; 791 } 792 793 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) 794 gt->info.vdbox_sfc_access |= BIT(i); 795 logical_vdbox++; 796 } 797 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 798 vdbox_mask, VDBOX_MASK(gt)); 799 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 800 801 for (i = 0; i < I915_MAX_VECS; i++) { 802 if (!HAS_ENGINE(gt, _VECS(i))) { 803 vebox_mask &= ~BIT(i); 804 continue; 805 } 806 807 if (!(BIT(i) & vebox_mask)) { 808 gt->info.engine_mask &= ~BIT(_VECS(i)); 809 drm_dbg(&i915->drm, "vecs%u fused off\n", i); 810 } 811 } 812 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", 813 vebox_mask, VEBOX_MASK(gt)); 814 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 815 } 816 817 static void engine_mask_apply_compute_fuses(struct intel_gt *gt) 818 { 819 struct drm_i915_private *i915 = gt->i915; 820 struct intel_gt_info *info = >->info; 821 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS; 822 unsigned long ccs_mask; 823 unsigned int i; 824 825 if (GRAPHICS_VER(i915) < 11) 826 return; 827 828 if (hweight32(CCS_MASK(gt)) <= 1) 829 return; 830 831 ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask, 832 ss_per_ccs); 833 /* 834 * If all DSS in a quadrant are fused off, the corresponding CCS 835 * engine is not available for use. 836 */ 837 for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) { 838 info->engine_mask &= ~BIT(_CCS(i)); 839 drm_dbg(&i915->drm, "ccs%u fused off\n", i); 840 } 841 } 842 843 static void engine_mask_apply_copy_fuses(struct intel_gt *gt) 844 { 845 struct drm_i915_private *i915 = gt->i915; 846 struct intel_gt_info *info = >->info; 847 unsigned long meml3_mask; 848 unsigned long quad; 849 850 if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && 851 GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) 852 return; 853 854 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); 855 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); 856 857 /* 858 * Link Copy engines may be fused off according to meml3_mask. Each 859 * bit is a quad that houses 2 Link Copy and two Sub Copy engines. 860 */ 861 for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) { 862 unsigned int instance = quad * 2 + 1; 863 intel_engine_mask_t mask = GENMASK(_BCS(instance + 1), 864 _BCS(instance)); 865 866 if (mask & info->engine_mask) { 867 drm_dbg(&i915->drm, "bcs%u fused off\n", instance); 868 drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1); 869 870 info->engine_mask &= ~mask; 871 } 872 } 873 } 874 875 /* 876 * Determine which engines are fused off in our particular hardware. 877 * Note that we have a catch-22 situation where we need to be able to access 878 * the blitter forcewake domain to read the engine fuses, but at the same time 879 * we need to know which engines are available on the system to know which 880 * forcewake domains are present. We solve this by intializing the forcewake 881 * domains based on the full engine mask in the platform capabilities before 882 * calling this function and pruning the domains for fused-off engines 883 * afterwards. 884 */ 885 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) 886 { 887 struct intel_gt_info *info = >->info; 888 889 GEM_BUG_ON(!info->engine_mask); 890 891 engine_mask_apply_media_fuses(gt); 892 engine_mask_apply_compute_fuses(gt); 893 engine_mask_apply_copy_fuses(gt); 894 895 return info->engine_mask; 896 } 897 898 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, 899 u8 class, const u8 *map, u8 num_instances) 900 { 901 int i, j; 902 u8 current_logical_id = 0; 903 904 for (j = 0; j < num_instances; ++j) { 905 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 906 if (!HAS_ENGINE(gt, i) || 907 intel_engines[i].class != class) 908 continue; 909 910 if (intel_engines[i].instance == map[j]) { 911 logical_ids[intel_engines[i].instance] = 912 current_logical_id++; 913 break; 914 } 915 } 916 } 917 } 918 919 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) 920 { 921 /* 922 * Logical to physical mapping is needed for proper support 923 * to split-frame feature. 924 */ 925 if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) { 926 const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 }; 927 928 populate_logical_ids(gt, logical_ids, class, 929 map, ARRAY_SIZE(map)); 930 } else { 931 int i; 932 u8 map[MAX_ENGINE_INSTANCE + 1]; 933 934 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) 935 map[i] = i; 936 populate_logical_ids(gt, logical_ids, class, 937 map, ARRAY_SIZE(map)); 938 } 939 } 940 941 /** 942 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers 943 * @gt: pointer to struct intel_gt 944 * 945 * Return: non-zero if the initialization failed. 946 */ 947 int intel_engines_init_mmio(struct intel_gt *gt) 948 { 949 struct drm_i915_private *i915 = gt->i915; 950 const unsigned int engine_mask = init_engine_mask(gt); 951 unsigned int mask = 0; 952 unsigned int i, class; 953 u8 logical_ids[MAX_ENGINE_INSTANCE + 1]; 954 int err; 955 956 drm_WARN_ON(&i915->drm, engine_mask == 0); 957 drm_WARN_ON(&i915->drm, engine_mask & 958 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); 959 960 if (i915_inject_probe_failure(i915)) 961 return -ENODEV; 962 963 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { 964 setup_logical_ids(gt, logical_ids, class); 965 966 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 967 u8 instance = intel_engines[i].instance; 968 969 if (intel_engines[i].class != class || 970 !HAS_ENGINE(gt, i)) 971 continue; 972 973 err = intel_engine_setup(gt, i, 974 logical_ids[instance]); 975 if (err) 976 goto cleanup; 977 978 mask |= BIT(i); 979 } 980 } 981 982 /* 983 * Catch failures to update intel_engines table when the new engines 984 * are added to the driver by a warning and disabling the forgotten 985 * engines. 986 */ 987 if (drm_WARN_ON(&i915->drm, mask != engine_mask)) 988 gt->info.engine_mask = mask; 989 990 gt->info.num_engines = hweight32(mask); 991 992 intel_gt_check_and_clear_faults(gt); 993 994 intel_setup_engine_capabilities(gt); 995 996 intel_uncore_prune_engine_fw_domains(gt->uncore, gt); 997 998 return 0; 999 1000 cleanup: 1001 intel_engines_free(gt); 1002 return err; 1003 } 1004 1005 void intel_engine_init_execlists(struct intel_engine_cs *engine) 1006 { 1007 struct intel_engine_execlists * const execlists = &engine->execlists; 1008 1009 execlists->port_mask = 1; 1010 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); 1011 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 1012 1013 memset(execlists->pending, 0, sizeof(execlists->pending)); 1014 execlists->active = 1015 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 1016 } 1017 1018 static void cleanup_status_page(struct intel_engine_cs *engine) 1019 { 1020 struct i915_vma *vma; 1021 1022 /* Prevent writes into HWSP after returning the page to the system */ 1023 intel_engine_set_hwsp_writemask(engine, ~0u); 1024 1025 vma = fetch_and_zero(&engine->status_page.vma); 1026 if (!vma) 1027 return; 1028 1029 if (!HWS_NEEDS_PHYSICAL(engine->i915)) 1030 i915_vma_unpin(vma); 1031 1032 i915_gem_object_unpin_map(vma->obj); 1033 i915_gem_object_put(vma->obj); 1034 } 1035 1036 static int pin_ggtt_status_page(struct intel_engine_cs *engine, 1037 struct i915_gem_ww_ctx *ww, 1038 struct i915_vma *vma) 1039 { 1040 unsigned int flags; 1041 1042 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) 1043 /* 1044 * On g33, we cannot place HWS above 256MiB, so 1045 * restrict its pinning to the low mappable arena. 1046 * Though this restriction is not documented for 1047 * gen4, gen5, or byt, they also behave similarly 1048 * and hang if the HWS is placed at the top of the 1049 * GTT. To generalise, it appears that all !llc 1050 * platforms have issues with us placing the HWS 1051 * above the mappable region (even though we never 1052 * actually map it). 1053 */ 1054 flags = PIN_MAPPABLE; 1055 else 1056 flags = PIN_HIGH; 1057 1058 return i915_ggtt_pin(vma, ww, 0, flags); 1059 } 1060 1061 static int init_status_page(struct intel_engine_cs *engine) 1062 { 1063 struct drm_i915_gem_object *obj; 1064 struct i915_gem_ww_ctx ww; 1065 struct i915_vma *vma; 1066 void *vaddr; 1067 int ret; 1068 1069 INIT_LIST_HEAD(&engine->status_page.timelines); 1070 1071 /* 1072 * Though the HWS register does support 36bit addresses, historically 1073 * we have had hangs and corruption reported due to wild writes if 1074 * the HWS is placed above 4G. We only allow objects to be allocated 1075 * in GFP_DMA32 for i965, and no earlier physical address users had 1076 * access to more than 4G. 1077 */ 1078 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 1079 if (IS_ERR(obj)) { 1080 drm_err(&engine->i915->drm, 1081 "Failed to allocate status page\n"); 1082 return PTR_ERR(obj); 1083 } 1084 1085 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 1086 1087 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1088 if (IS_ERR(vma)) { 1089 ret = PTR_ERR(vma); 1090 goto err_put; 1091 } 1092 1093 i915_gem_ww_ctx_init(&ww, true); 1094 retry: 1095 ret = i915_gem_object_lock(obj, &ww); 1096 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915)) 1097 ret = pin_ggtt_status_page(engine, &ww, vma); 1098 if (ret) 1099 goto err; 1100 1101 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1102 if (IS_ERR(vaddr)) { 1103 ret = PTR_ERR(vaddr); 1104 goto err_unpin; 1105 } 1106 1107 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); 1108 engine->status_page.vma = vma; 1109 1110 err_unpin: 1111 if (ret) 1112 i915_vma_unpin(vma); 1113 err: 1114 if (ret == -EDEADLK) { 1115 ret = i915_gem_ww_ctx_backoff(&ww); 1116 if (!ret) 1117 goto retry; 1118 } 1119 i915_gem_ww_ctx_fini(&ww); 1120 err_put: 1121 if (ret) 1122 i915_gem_object_put(obj); 1123 return ret; 1124 } 1125 1126 static int engine_setup_common(struct intel_engine_cs *engine) 1127 { 1128 int err; 1129 1130 init_llist_head(&engine->barrier_tasks); 1131 1132 err = init_status_page(engine); 1133 if (err) 1134 return err; 1135 1136 engine->breadcrumbs = intel_breadcrumbs_create(engine); 1137 if (!engine->breadcrumbs) { 1138 err = -ENOMEM; 1139 goto err_status; 1140 } 1141 1142 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL); 1143 if (!engine->sched_engine) { 1144 err = -ENOMEM; 1145 goto err_sched_engine; 1146 } 1147 engine->sched_engine->private_data = engine; 1148 1149 err = intel_engine_init_cmd_parser(engine); 1150 if (err) 1151 goto err_cmd_parser; 1152 1153 intel_engine_init_execlists(engine); 1154 intel_engine_init__pm(engine); 1155 intel_engine_init_retire(engine); 1156 1157 /* Use the whole device by default */ 1158 engine->sseu = 1159 intel_sseu_from_device_info(&engine->gt->info.sseu); 1160 1161 intel_engine_init_workarounds(engine); 1162 intel_engine_init_whitelist(engine); 1163 intel_engine_init_ctx_wa(engine); 1164 1165 if (GRAPHICS_VER(engine->i915) >= 12) 1166 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; 1167 1168 return 0; 1169 1170 err_cmd_parser: 1171 i915_sched_engine_put(engine->sched_engine); 1172 err_sched_engine: 1173 intel_breadcrumbs_put(engine->breadcrumbs); 1174 err_status: 1175 cleanup_status_page(engine); 1176 return err; 1177 } 1178 1179 struct measure_breadcrumb { 1180 struct i915_request rq; 1181 struct intel_ring ring; 1182 u32 cs[2048]; 1183 }; 1184 1185 static int measure_breadcrumb_dw(struct intel_context *ce) 1186 { 1187 struct intel_engine_cs *engine = ce->engine; 1188 struct measure_breadcrumb *frame; 1189 int dw; 1190 1191 GEM_BUG_ON(!engine->gt->scratch); 1192 1193 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1194 if (!frame) 1195 return -ENOMEM; 1196 1197 frame->rq.engine = engine; 1198 frame->rq.context = ce; 1199 rcu_assign_pointer(frame->rq.timeline, ce->timeline); 1200 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno; 1201 1202 frame->ring.vaddr = frame->cs; 1203 frame->ring.size = sizeof(frame->cs); 1204 frame->ring.wrap = 1205 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size); 1206 frame->ring.effective_size = frame->ring.size; 1207 intel_ring_update_space(&frame->ring); 1208 frame->rq.ring = &frame->ring; 1209 1210 mutex_lock(&ce->timeline->mutex); 1211 spin_lock_irq(&engine->sched_engine->lock); 1212 1213 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; 1214 1215 spin_unlock_irq(&engine->sched_engine->lock); 1216 mutex_unlock(&ce->timeline->mutex); 1217 1218 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ 1219 1220 kfree(frame); 1221 return dw; 1222 } 1223 1224 struct intel_context * 1225 intel_engine_create_pinned_context(struct intel_engine_cs *engine, 1226 struct i915_address_space *vm, 1227 unsigned int ring_size, 1228 unsigned int hwsp, 1229 struct lock_class_key *key, 1230 const char *name) 1231 { 1232 struct intel_context *ce; 1233 int err; 1234 1235 ce = intel_context_create(engine); 1236 if (IS_ERR(ce)) 1237 return ce; 1238 1239 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags); 1240 ce->timeline = page_pack_bits(NULL, hwsp); 1241 ce->ring = NULL; 1242 ce->ring_size = ring_size; 1243 1244 i915_vm_put(ce->vm); 1245 ce->vm = i915_vm_get(vm); 1246 1247 err = intel_context_pin(ce); /* perma-pin so it is always available */ 1248 if (err) { 1249 intel_context_put(ce); 1250 return ERR_PTR(err); 1251 } 1252 1253 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list); 1254 1255 /* 1256 * Give our perma-pinned kernel timelines a separate lockdep class, 1257 * so that we can use them from within the normal user timelines 1258 * should we need to inject GPU operations during their request 1259 * construction. 1260 */ 1261 lockdep_set_class_and_name(&ce->timeline->mutex, key, name); 1262 1263 return ce; 1264 } 1265 1266 void intel_engine_destroy_pinned_context(struct intel_context *ce) 1267 { 1268 struct intel_engine_cs *engine = ce->engine; 1269 struct i915_vma *hwsp = engine->status_page.vma; 1270 1271 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp); 1272 1273 mutex_lock(&hwsp->vm->mutex); 1274 list_del(&ce->timeline->engine_link); 1275 mutex_unlock(&hwsp->vm->mutex); 1276 1277 list_del(&ce->pinned_contexts_link); 1278 intel_context_unpin(ce); 1279 intel_context_put(ce); 1280 } 1281 1282 static struct intel_context * 1283 create_kernel_context(struct intel_engine_cs *engine) 1284 { 1285 static struct lock_class_key kernel; 1286 1287 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, 1288 I915_GEM_HWS_SEQNO_ADDR, 1289 &kernel, "kernel_context"); 1290 } 1291 1292 /** 1293 * intel_engines_init_common - initialize cengine state which might require hw access 1294 * @engine: Engine to initialize. 1295 * 1296 * Initializes @engine@ structure members shared between legacy and execlists 1297 * submission modes which do require hardware access. 1298 * 1299 * Typcally done at later stages of submission mode specific engine setup. 1300 * 1301 * Returns zero on success or an error code on failure. 1302 */ 1303 static int engine_init_common(struct intel_engine_cs *engine) 1304 { 1305 struct intel_context *ce; 1306 int ret; 1307 1308 engine->set_default_submission(engine); 1309 1310 /* 1311 * We may need to do things with the shrinker which 1312 * require us to immediately switch back to the default 1313 * context. This can cause a problem as pinning the 1314 * default context also requires GTT space which may not 1315 * be available. To avoid this we always pin the default 1316 * context. 1317 */ 1318 ce = create_kernel_context(engine); 1319 if (IS_ERR(ce)) 1320 return PTR_ERR(ce); 1321 1322 ret = measure_breadcrumb_dw(ce); 1323 if (ret < 0) 1324 goto err_context; 1325 1326 engine->emit_fini_breadcrumb_dw = ret; 1327 engine->kernel_context = ce; 1328 1329 return 0; 1330 1331 err_context: 1332 intel_engine_destroy_pinned_context(ce); 1333 return ret; 1334 } 1335 1336 int intel_engines_init(struct intel_gt *gt) 1337 { 1338 int (*setup)(struct intel_engine_cs *engine); 1339 struct intel_engine_cs *engine; 1340 enum intel_engine_id id; 1341 int err; 1342 1343 if (intel_uc_uses_guc_submission(>->uc)) { 1344 gt->submission_method = INTEL_SUBMISSION_GUC; 1345 setup = intel_guc_submission_setup; 1346 } else if (HAS_EXECLISTS(gt->i915)) { 1347 gt->submission_method = INTEL_SUBMISSION_ELSP; 1348 setup = intel_execlists_submission_setup; 1349 } else { 1350 gt->submission_method = INTEL_SUBMISSION_RING; 1351 setup = intel_ring_submission_setup; 1352 } 1353 1354 for_each_engine(engine, gt, id) { 1355 err = engine_setup_common(engine); 1356 if (err) 1357 return err; 1358 1359 err = setup(engine); 1360 if (err) { 1361 intel_engine_cleanup_common(engine); 1362 return err; 1363 } 1364 1365 /* The backend should now be responsible for cleanup */ 1366 GEM_BUG_ON(engine->release == NULL); 1367 1368 err = engine_init_common(engine); 1369 if (err) 1370 return err; 1371 1372 intel_engine_add_user(engine); 1373 } 1374 1375 return 0; 1376 } 1377 1378 /** 1379 * intel_engines_cleanup_common - cleans up the engine state created by 1380 * the common initiailizers. 1381 * @engine: Engine to cleanup. 1382 * 1383 * This cleans up everything created by the common helpers. 1384 */ 1385 void intel_engine_cleanup_common(struct intel_engine_cs *engine) 1386 { 1387 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests)); 1388 1389 i915_sched_engine_put(engine->sched_engine); 1390 intel_breadcrumbs_put(engine->breadcrumbs); 1391 1392 intel_engine_fini_retire(engine); 1393 intel_engine_cleanup_cmd_parser(engine); 1394 1395 if (engine->default_state) 1396 fput(engine->default_state); 1397 1398 if (engine->kernel_context) 1399 intel_engine_destroy_pinned_context(engine->kernel_context); 1400 1401 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); 1402 cleanup_status_page(engine); 1403 1404 intel_wa_list_free(&engine->ctx_wa_list); 1405 intel_wa_list_free(&engine->wa_list); 1406 intel_wa_list_free(&engine->whitelist); 1407 } 1408 1409 /** 1410 * intel_engine_resume - re-initializes the HW state of the engine 1411 * @engine: Engine to resume. 1412 * 1413 * Returns zero on success or an error code on failure. 1414 */ 1415 int intel_engine_resume(struct intel_engine_cs *engine) 1416 { 1417 intel_engine_apply_workarounds(engine); 1418 intel_engine_apply_whitelist(engine); 1419 1420 return engine->resume(engine); 1421 } 1422 1423 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 1424 { 1425 struct drm_i915_private *i915 = engine->i915; 1426 1427 u64 acthd; 1428 1429 if (GRAPHICS_VER(i915) >= 8) 1430 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); 1431 else if (GRAPHICS_VER(i915) >= 4) 1432 acthd = ENGINE_READ(engine, RING_ACTHD); 1433 else 1434 acthd = ENGINE_READ(engine, ACTHD); 1435 1436 return acthd; 1437 } 1438 1439 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 1440 { 1441 u64 bbaddr; 1442 1443 if (GRAPHICS_VER(engine->i915) >= 8) 1444 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); 1445 else 1446 bbaddr = ENGINE_READ(engine, RING_BBADDR); 1447 1448 return bbaddr; 1449 } 1450 1451 static unsigned long stop_timeout(const struct intel_engine_cs *engine) 1452 { 1453 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ 1454 return 0; 1455 1456 /* 1457 * If we are doing a normal GPU reset, we can take our time and allow 1458 * the engine to quiesce. We've stopped submission to the engine, and 1459 * if we wait long enough an innocent context should complete and 1460 * leave the engine idle. So they should not be caught unaware by 1461 * the forthcoming GPU reset (which usually follows the stop_cs)! 1462 */ 1463 return READ_ONCE(engine->props.stop_timeout_ms); 1464 } 1465 1466 static int __intel_engine_stop_cs(struct intel_engine_cs *engine, 1467 int fast_timeout_us, 1468 int slow_timeout_ms) 1469 { 1470 struct intel_uncore *uncore = engine->uncore; 1471 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base); 1472 int err; 1473 1474 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); 1475 1476 /* 1477 * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is 1478 * stopped, set ring stop bit and prefetch disable bit to halt CS 1479 */ 1480 if (IS_GRAPHICS_VER(engine->i915, 11, 12)) 1481 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), 1482 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); 1483 1484 err = __intel_wait_for_register_fw(engine->uncore, mode, 1485 MODE_IDLE, MODE_IDLE, 1486 fast_timeout_us, 1487 slow_timeout_ms, 1488 NULL); 1489 1490 /* A final mmio read to let GPU writes be hopefully flushed to memory */ 1491 intel_uncore_posting_read_fw(uncore, mode); 1492 return err; 1493 } 1494 1495 int intel_engine_stop_cs(struct intel_engine_cs *engine) 1496 { 1497 int err = 0; 1498 1499 if (GRAPHICS_VER(engine->i915) < 3) 1500 return -ENODEV; 1501 1502 ENGINE_TRACE(engine, "\n"); 1503 /* 1504 * TODO: Find out why occasionally stopping the CS times out. Seen 1505 * especially with gem_eio tests. 1506 * 1507 * Occasionally trying to stop the cs times out, but does not adversely 1508 * affect functionality. The timeout is set as a config parameter that 1509 * defaults to 100ms. In most cases the follow up operation is to wait 1510 * for pending MI_FORCE_WAKES. The assumption is that this timeout is 1511 * sufficient for any pending MI_FORCEWAKEs to complete. Once root 1512 * caused, the caller must check and handle the return from this 1513 * function. 1514 */ 1515 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { 1516 ENGINE_TRACE(engine, 1517 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", 1518 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR, 1519 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR); 1520 1521 /* 1522 * Sometimes we observe that the idle flag is not 1523 * set even though the ring is empty. So double 1524 * check before giving up. 1525 */ 1526 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) != 1527 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR)) 1528 err = -ETIMEDOUT; 1529 } 1530 1531 return err; 1532 } 1533 1534 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) 1535 { 1536 ENGINE_TRACE(engine, "\n"); 1537 1538 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 1539 } 1540 1541 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) 1542 { 1543 static const i915_reg_t _reg[I915_NUM_ENGINES] = { 1544 [RCS0] = MSG_IDLE_CS, 1545 [BCS0] = MSG_IDLE_BCS, 1546 [VCS0] = MSG_IDLE_VCS0, 1547 [VCS1] = MSG_IDLE_VCS1, 1548 [VCS2] = MSG_IDLE_VCS2, 1549 [VCS3] = MSG_IDLE_VCS3, 1550 [VCS4] = MSG_IDLE_VCS4, 1551 [VCS5] = MSG_IDLE_VCS5, 1552 [VCS6] = MSG_IDLE_VCS6, 1553 [VCS7] = MSG_IDLE_VCS7, 1554 [VECS0] = MSG_IDLE_VECS0, 1555 [VECS1] = MSG_IDLE_VECS1, 1556 [VECS2] = MSG_IDLE_VECS2, 1557 [VECS3] = MSG_IDLE_VECS3, 1558 [CCS0] = MSG_IDLE_CS, 1559 [CCS1] = MSG_IDLE_CS, 1560 [CCS2] = MSG_IDLE_CS, 1561 [CCS3] = MSG_IDLE_CS, 1562 }; 1563 u32 val; 1564 1565 if (!_reg[engine->id].reg) { 1566 drm_err(&engine->i915->drm, 1567 "MSG IDLE undefined for engine id %u\n", engine->id); 1568 return 0; 1569 } 1570 1571 val = intel_uncore_read(engine->uncore, _reg[engine->id]); 1572 1573 /* bits[29:25] & bits[13:9] >> shift */ 1574 return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; 1575 } 1576 1577 static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) 1578 { 1579 int ret; 1580 1581 /* Ensure GPM receives fw up/down after CS is stopped */ 1582 udelay(1); 1583 1584 /* Wait for forcewake request to complete in GPM */ 1585 ret = __intel_wait_for_register_fw(gt->uncore, 1586 GEN9_PWRGT_DOMAIN_STATUS, 1587 fw_mask, fw_mask, 5000, 0, NULL); 1588 1589 /* Ensure CS receives fw ack from GPM */ 1590 udelay(1); 1591 1592 if (ret) 1593 GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); 1594 } 1595 1596 /* 1597 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any 1598 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The 1599 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the 1600 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we 1601 * are concerned only with the gt reset here, we use a logical OR of pending 1602 * forcewakeups from all reset domains and then wait for them to complete by 1603 * querying PWRGT_DOMAIN_STATUS. 1604 */ 1605 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine) 1606 { 1607 u32 fw_pending = __cs_pending_mi_force_wakes(engine); 1608 1609 if (fw_pending) 1610 __gpm_wait_for_fw_complete(engine->gt, fw_pending); 1611 } 1612 1613 /* NB: please notice the memset */ 1614 void intel_engine_get_instdone(const struct intel_engine_cs *engine, 1615 struct intel_instdone *instdone) 1616 { 1617 struct drm_i915_private *i915 = engine->i915; 1618 struct intel_uncore *uncore = engine->uncore; 1619 u32 mmio_base = engine->mmio_base; 1620 int slice; 1621 int subslice; 1622 int iter; 1623 1624 memset(instdone, 0, sizeof(*instdone)); 1625 1626 if (GRAPHICS_VER(i915) >= 8) { 1627 instdone->instdone = 1628 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1629 1630 if (engine->id != RCS0) 1631 return; 1632 1633 instdone->slice_common = 1634 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1635 if (GRAPHICS_VER(i915) >= 12) { 1636 instdone->slice_common_extra[0] = 1637 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA); 1638 instdone->slice_common_extra[1] = 1639 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2); 1640 } 1641 1642 for_each_ss_steering(iter, engine->gt, slice, subslice) { 1643 instdone->sampler[slice][subslice] = 1644 intel_gt_mcr_read(engine->gt, 1645 GEN8_SAMPLER_INSTDONE, 1646 slice, subslice); 1647 instdone->row[slice][subslice] = 1648 intel_gt_mcr_read(engine->gt, 1649 GEN8_ROW_INSTDONE, 1650 slice, subslice); 1651 } 1652 1653 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 1654 for_each_ss_steering(iter, engine->gt, slice, subslice) 1655 instdone->geom_svg[slice][subslice] = 1656 intel_gt_mcr_read(engine->gt, 1657 XEHPG_INSTDONE_GEOM_SVG, 1658 slice, subslice); 1659 } 1660 } else if (GRAPHICS_VER(i915) >= 7) { 1661 instdone->instdone = 1662 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1663 1664 if (engine->id != RCS0) 1665 return; 1666 1667 instdone->slice_common = 1668 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1669 instdone->sampler[0][0] = 1670 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); 1671 instdone->row[0][0] = 1672 intel_uncore_read(uncore, GEN7_ROW_INSTDONE); 1673 } else if (GRAPHICS_VER(i915) >= 4) { 1674 instdone->instdone = 1675 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1676 if (engine->id == RCS0) 1677 /* HACK: Using the wrong struct member */ 1678 instdone->slice_common = 1679 intel_uncore_read(uncore, GEN4_INSTDONE1); 1680 } else { 1681 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); 1682 } 1683 } 1684 1685 static bool ring_is_idle(struct intel_engine_cs *engine) 1686 { 1687 bool idle = true; 1688 1689 if (I915_SELFTEST_ONLY(!engine->mmio_base)) 1690 return true; 1691 1692 if (!intel_engine_pm_get_if_awake(engine)) 1693 return true; 1694 1695 /* First check that no commands are left in the ring */ 1696 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != 1697 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) 1698 idle = false; 1699 1700 /* No bit for gen2, so assume the CS parser is idle */ 1701 if (GRAPHICS_VER(engine->i915) > 2 && 1702 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) 1703 idle = false; 1704 1705 intel_engine_pm_put(engine); 1706 1707 return idle; 1708 } 1709 1710 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) 1711 { 1712 struct tasklet_struct *t = &engine->sched_engine->tasklet; 1713 1714 if (!t->callback) 1715 return; 1716 1717 local_bh_disable(); 1718 if (tasklet_trylock(t)) { 1719 /* Must wait for any GPU reset in progress. */ 1720 if (__tasklet_is_enabled(t)) 1721 t->callback(t); 1722 tasklet_unlock(t); 1723 } 1724 local_bh_enable(); 1725 1726 /* Synchronise and wait for the tasklet on another CPU */ 1727 if (sync) 1728 tasklet_unlock_wait(t); 1729 } 1730 1731 /** 1732 * intel_engine_is_idle() - Report if the engine has finished process all work 1733 * @engine: the intel_engine_cs 1734 * 1735 * Return true if there are no requests pending, nothing left to be submitted 1736 * to hardware, and that the engine is idle. 1737 */ 1738 bool intel_engine_is_idle(struct intel_engine_cs *engine) 1739 { 1740 /* More white lies, if wedged, hw state is inconsistent */ 1741 if (intel_gt_is_wedged(engine->gt)) 1742 return true; 1743 1744 if (!intel_engine_pm_is_awake(engine)) 1745 return true; 1746 1747 /* Waiting to drain ELSP? */ 1748 intel_synchronize_hardirq(engine->i915); 1749 intel_engine_flush_submission(engine); 1750 1751 /* ELSP is empty, but there are ready requests? E.g. after reset */ 1752 if (!i915_sched_engine_is_empty(engine->sched_engine)) 1753 return false; 1754 1755 /* Ring stopped? */ 1756 return ring_is_idle(engine); 1757 } 1758 1759 bool intel_engines_are_idle(struct intel_gt *gt) 1760 { 1761 struct intel_engine_cs *engine; 1762 enum intel_engine_id id; 1763 1764 /* 1765 * If the driver is wedged, HW state may be very inconsistent and 1766 * report that it is still busy, even though we have stopped using it. 1767 */ 1768 if (intel_gt_is_wedged(gt)) 1769 return true; 1770 1771 /* Already parked (and passed an idleness test); must still be idle */ 1772 if (!READ_ONCE(gt->awake)) 1773 return true; 1774 1775 for_each_engine(engine, gt, id) { 1776 if (!intel_engine_is_idle(engine)) 1777 return false; 1778 } 1779 1780 return true; 1781 } 1782 1783 bool intel_engine_irq_enable(struct intel_engine_cs *engine) 1784 { 1785 if (!engine->irq_enable) 1786 return false; 1787 1788 /* Caller disables interrupts */ 1789 spin_lock(engine->gt->irq_lock); 1790 engine->irq_enable(engine); 1791 spin_unlock(engine->gt->irq_lock); 1792 1793 return true; 1794 } 1795 1796 void intel_engine_irq_disable(struct intel_engine_cs *engine) 1797 { 1798 if (!engine->irq_disable) 1799 return; 1800 1801 /* Caller disables interrupts */ 1802 spin_lock(engine->gt->irq_lock); 1803 engine->irq_disable(engine); 1804 spin_unlock(engine->gt->irq_lock); 1805 } 1806 1807 void intel_engines_reset_default_submission(struct intel_gt *gt) 1808 { 1809 struct intel_engine_cs *engine; 1810 enum intel_engine_id id; 1811 1812 for_each_engine(engine, gt, id) { 1813 if (engine->sanitize) 1814 engine->sanitize(engine); 1815 1816 engine->set_default_submission(engine); 1817 } 1818 } 1819 1820 bool intel_engine_can_store_dword(struct intel_engine_cs *engine) 1821 { 1822 switch (GRAPHICS_VER(engine->i915)) { 1823 case 2: 1824 return false; /* uses physical not virtual addresses */ 1825 case 3: 1826 /* maybe only uses physical not virtual addresses */ 1827 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); 1828 case 4: 1829 return !IS_I965G(engine->i915); /* who knows! */ 1830 case 6: 1831 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ 1832 default: 1833 return true; 1834 } 1835 } 1836 1837 static struct intel_timeline *get_timeline(struct i915_request *rq) 1838 { 1839 struct intel_timeline *tl; 1840 1841 /* 1842 * Even though we are holding the engine->sched_engine->lock here, there 1843 * is no control over the submission queue per-se and we are 1844 * inspecting the active state at a random point in time, with an 1845 * unknown queue. Play safe and make sure the timeline remains valid. 1846 * (Only being used for pretty printing, one extra kref shouldn't 1847 * cause a camel stampede!) 1848 */ 1849 rcu_read_lock(); 1850 tl = rcu_dereference(rq->timeline); 1851 if (!kref_get_unless_zero(&tl->kref)) 1852 tl = NULL; 1853 rcu_read_unlock(); 1854 1855 return tl; 1856 } 1857 1858 static int print_ring(char *buf, int sz, struct i915_request *rq) 1859 { 1860 int len = 0; 1861 1862 if (!i915_request_signaled(rq)) { 1863 struct intel_timeline *tl = get_timeline(rq); 1864 1865 len = scnprintf(buf, sz, 1866 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", 1867 i915_ggtt_offset(rq->ring->vma), 1868 tl ? tl->hwsp_offset : 0, 1869 hwsp_seqno(rq), 1870 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), 1871 1000 * 1000)); 1872 1873 if (tl) 1874 intel_timeline_put(tl); 1875 } 1876 1877 return len; 1878 } 1879 1880 static void hexdump(struct drm_printer *m, const void *buf, size_t len) 1881 { 1882 const size_t rowsize = 8 * sizeof(u32); 1883 const void *prev = NULL; 1884 bool skip = false; 1885 size_t pos; 1886 1887 for (pos = 0; pos < len; pos += rowsize) { 1888 char line[128]; 1889 1890 if (prev && !memcmp(prev, buf + pos, rowsize)) { 1891 if (!skip) { 1892 drm_printf(m, "*\n"); 1893 skip = true; 1894 } 1895 continue; 1896 } 1897 1898 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, 1899 rowsize, sizeof(u32), 1900 line, sizeof(line), 1901 false) >= sizeof(line)); 1902 drm_printf(m, "[%04zx] %s\n", pos, line); 1903 1904 prev = buf + pos; 1905 skip = false; 1906 } 1907 } 1908 1909 static const char *repr_timer(const struct timer_list *t) 1910 { 1911 if (!READ_ONCE(t->expires)) 1912 return "inactive"; 1913 1914 if (timer_pending(t)) 1915 return "active"; 1916 1917 return "expired"; 1918 } 1919 1920 static void intel_engine_print_registers(struct intel_engine_cs *engine, 1921 struct drm_printer *m) 1922 { 1923 struct drm_i915_private *dev_priv = engine->i915; 1924 struct intel_engine_execlists * const execlists = &engine->execlists; 1925 u64 addr; 1926 1927 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7)) 1928 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); 1929 if (HAS_EXECLISTS(dev_priv)) { 1930 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", 1931 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); 1932 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", 1933 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); 1934 } 1935 drm_printf(m, "\tRING_START: 0x%08x\n", 1936 ENGINE_READ(engine, RING_START)); 1937 drm_printf(m, "\tRING_HEAD: 0x%08x\n", 1938 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); 1939 drm_printf(m, "\tRING_TAIL: 0x%08x\n", 1940 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); 1941 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1942 ENGINE_READ(engine, RING_CTL), 1943 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 1944 if (GRAPHICS_VER(engine->i915) > 2) { 1945 drm_printf(m, "\tRING_MODE: 0x%08x%s\n", 1946 ENGINE_READ(engine, RING_MI_MODE), 1947 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); 1948 } 1949 1950 if (GRAPHICS_VER(dev_priv) >= 6) { 1951 drm_printf(m, "\tRING_IMR: 0x%08x\n", 1952 ENGINE_READ(engine, RING_IMR)); 1953 drm_printf(m, "\tRING_ESR: 0x%08x\n", 1954 ENGINE_READ(engine, RING_ESR)); 1955 drm_printf(m, "\tRING_EMR: 0x%08x\n", 1956 ENGINE_READ(engine, RING_EMR)); 1957 drm_printf(m, "\tRING_EIR: 0x%08x\n", 1958 ENGINE_READ(engine, RING_EIR)); 1959 } 1960 1961 addr = intel_engine_get_active_head(engine); 1962 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1963 upper_32_bits(addr), lower_32_bits(addr)); 1964 addr = intel_engine_get_last_batch_head(engine); 1965 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", 1966 upper_32_bits(addr), lower_32_bits(addr)); 1967 if (GRAPHICS_VER(dev_priv) >= 8) 1968 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); 1969 else if (GRAPHICS_VER(dev_priv) >= 4) 1970 addr = ENGINE_READ(engine, RING_DMA_FADD); 1971 else 1972 addr = ENGINE_READ(engine, DMA_FADD_I8XX); 1973 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", 1974 upper_32_bits(addr), lower_32_bits(addr)); 1975 if (GRAPHICS_VER(dev_priv) >= 4) { 1976 drm_printf(m, "\tIPEIR: 0x%08x\n", 1977 ENGINE_READ(engine, RING_IPEIR)); 1978 drm_printf(m, "\tIPEHR: 0x%08x\n", 1979 ENGINE_READ(engine, RING_IPEHR)); 1980 } else { 1981 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); 1982 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); 1983 } 1984 1985 if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) { 1986 struct i915_request * const *port, *rq; 1987 const u32 *hws = 1988 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 1989 const u8 num_entries = execlists->csb_size; 1990 unsigned int idx; 1991 u8 read, write; 1992 1993 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", 1994 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)), 1995 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)), 1996 repr_timer(&engine->execlists.preempt), 1997 repr_timer(&engine->execlists.timer)); 1998 1999 read = execlists->csb_head; 2000 write = READ_ONCE(*execlists->csb_write); 2001 2002 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n", 2003 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), 2004 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), 2005 read, write, num_entries); 2006 2007 if (read >= num_entries) 2008 read = 0; 2009 if (write >= num_entries) 2010 write = 0; 2011 if (read > write) 2012 write += num_entries; 2013 while (read < write) { 2014 idx = ++read % num_entries; 2015 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 2016 idx, hws[idx * 2], hws[idx * 2 + 1]); 2017 } 2018 2019 i915_sched_engine_active_lock_bh(engine->sched_engine); 2020 rcu_read_lock(); 2021 for (port = execlists->active; (rq = *port); port++) { 2022 char hdr[160]; 2023 int len; 2024 2025 len = scnprintf(hdr, sizeof(hdr), 2026 "\t\tActive[%d]: ccid:%08x%s%s, ", 2027 (int)(port - execlists->active), 2028 rq->context->lrc.ccid, 2029 intel_context_is_closed(rq->context) ? "!" : "", 2030 intel_context_is_banned(rq->context) ? "*" : ""); 2031 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2032 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2033 i915_request_show(m, rq, hdr, 0); 2034 } 2035 for (port = execlists->pending; (rq = *port); port++) { 2036 char hdr[160]; 2037 int len; 2038 2039 len = scnprintf(hdr, sizeof(hdr), 2040 "\t\tPending[%d]: ccid:%08x%s%s, ", 2041 (int)(port - execlists->pending), 2042 rq->context->lrc.ccid, 2043 intel_context_is_closed(rq->context) ? "!" : "", 2044 intel_context_is_banned(rq->context) ? "*" : ""); 2045 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2046 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2047 i915_request_show(m, rq, hdr, 0); 2048 } 2049 rcu_read_unlock(); 2050 i915_sched_engine_active_unlock_bh(engine->sched_engine); 2051 } else if (GRAPHICS_VER(dev_priv) > 6) { 2052 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 2053 ENGINE_READ(engine, RING_PP_DIR_BASE)); 2054 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 2055 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); 2056 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 2057 ENGINE_READ(engine, RING_PP_DIR_DCLV)); 2058 } 2059 } 2060 2061 static void print_request_ring(struct drm_printer *m, struct i915_request *rq) 2062 { 2063 struct i915_vma_resource *vma_res = rq->batch_res; 2064 void *ring; 2065 int size; 2066 2067 drm_printf(m, 2068 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", 2069 rq->head, rq->postfix, rq->tail, 2070 vma_res ? upper_32_bits(vma_res->start) : ~0u, 2071 vma_res ? lower_32_bits(vma_res->start) : ~0u); 2072 2073 size = rq->tail - rq->head; 2074 if (rq->tail < rq->head) 2075 size += rq->ring->size; 2076 2077 ring = kmalloc(size, GFP_ATOMIC); 2078 if (ring) { 2079 const void *vaddr = rq->ring->vaddr; 2080 unsigned int head = rq->head; 2081 unsigned int len = 0; 2082 2083 if (rq->tail < head) { 2084 len = rq->ring->size - head; 2085 memcpy(ring, vaddr + head, len); 2086 head = 0; 2087 } 2088 memcpy(ring + len, vaddr + head, size - len); 2089 2090 hexdump(m, ring, size); 2091 kfree(ring); 2092 } 2093 } 2094 2095 static unsigned long list_count(struct list_head *list) 2096 { 2097 struct list_head *pos; 2098 unsigned long count = 0; 2099 2100 list_for_each(pos, list) 2101 count++; 2102 2103 return count; 2104 } 2105 2106 static unsigned long read_ul(void *p, size_t x) 2107 { 2108 return *(unsigned long *)(p + x); 2109 } 2110 2111 static void print_properties(struct intel_engine_cs *engine, 2112 struct drm_printer *m) 2113 { 2114 static const struct pmap { 2115 size_t offset; 2116 const char *name; 2117 } props[] = { 2118 #define P(x) { \ 2119 .offset = offsetof(typeof(engine->props), x), \ 2120 .name = #x \ 2121 } 2122 P(heartbeat_interval_ms), 2123 P(max_busywait_duration_ns), 2124 P(preempt_timeout_ms), 2125 P(stop_timeout_ms), 2126 P(timeslice_duration_ms), 2127 2128 {}, 2129 #undef P 2130 }; 2131 const struct pmap *p; 2132 2133 drm_printf(m, "\tProperties:\n"); 2134 for (p = props; p->name; p++) 2135 drm_printf(m, "\t\t%s: %lu [default %lu]\n", 2136 p->name, 2137 read_ul(&engine->props, p->offset), 2138 read_ul(&engine->defaults, p->offset)); 2139 } 2140 2141 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg) 2142 { 2143 struct intel_timeline *tl = get_timeline(rq); 2144 2145 i915_request_show(m, rq, msg, 0); 2146 2147 drm_printf(m, "\t\tring->start: 0x%08x\n", 2148 i915_ggtt_offset(rq->ring->vma)); 2149 drm_printf(m, "\t\tring->head: 0x%08x\n", 2150 rq->ring->head); 2151 drm_printf(m, "\t\tring->tail: 0x%08x\n", 2152 rq->ring->tail); 2153 drm_printf(m, "\t\tring->emit: 0x%08x\n", 2154 rq->ring->emit); 2155 drm_printf(m, "\t\tring->space: 0x%08x\n", 2156 rq->ring->space); 2157 2158 if (tl) { 2159 drm_printf(m, "\t\tring->hwsp: 0x%08x\n", 2160 tl->hwsp_offset); 2161 intel_timeline_put(tl); 2162 } 2163 2164 print_request_ring(m, rq); 2165 2166 if (rq->context->lrc_reg_state) { 2167 drm_printf(m, "Logical Ring Context:\n"); 2168 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE); 2169 } 2170 } 2171 2172 void intel_engine_dump_active_requests(struct list_head *requests, 2173 struct i915_request *hung_rq, 2174 struct drm_printer *m) 2175 { 2176 struct i915_request *rq; 2177 const char *msg; 2178 enum i915_request_state state; 2179 2180 list_for_each_entry(rq, requests, sched.link) { 2181 if (rq == hung_rq) 2182 continue; 2183 2184 state = i915_test_request_state(rq); 2185 if (state < I915_REQUEST_QUEUED) 2186 continue; 2187 2188 if (state == I915_REQUEST_ACTIVE) 2189 msg = "\t\tactive on engine"; 2190 else 2191 msg = "\t\tactive in queue"; 2192 2193 engine_dump_request(rq, m, msg); 2194 } 2195 } 2196 2197 static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m) 2198 { 2199 struct i915_request *hung_rq = NULL; 2200 struct intel_context *ce; 2201 bool guc; 2202 2203 /* 2204 * No need for an engine->irq_seqno_barrier() before the seqno reads. 2205 * The GPU is still running so requests are still executing and any 2206 * hardware reads will be out of date by the time they are reported. 2207 * But the intention here is just to report an instantaneous snapshot 2208 * so that's fine. 2209 */ 2210 lockdep_assert_held(&engine->sched_engine->lock); 2211 2212 drm_printf(m, "\tRequests:\n"); 2213 2214 guc = intel_uc_uses_guc_submission(&engine->gt->uc); 2215 if (guc) { 2216 ce = intel_engine_get_hung_context(engine); 2217 if (ce) 2218 hung_rq = intel_context_find_active_request(ce); 2219 } else { 2220 hung_rq = intel_engine_execlist_find_hung_request(engine); 2221 } 2222 2223 if (hung_rq) 2224 engine_dump_request(hung_rq, m, "\t\thung"); 2225 2226 if (guc) 2227 intel_guc_dump_active_requests(engine, hung_rq, m); 2228 else 2229 intel_engine_dump_active_requests(&engine->sched_engine->requests, 2230 hung_rq, m); 2231 } 2232 2233 void intel_engine_dump(struct intel_engine_cs *engine, 2234 struct drm_printer *m, 2235 const char *header, ...) 2236 { 2237 struct i915_gpu_error * const error = &engine->i915->gpu_error; 2238 struct i915_request *rq; 2239 intel_wakeref_t wakeref; 2240 unsigned long flags; 2241 ktime_t dummy; 2242 2243 if (header) { 2244 va_list ap; 2245 2246 va_start(ap, header); 2247 drm_vprintf(m, header, &ap); 2248 va_end(ap); 2249 } 2250 2251 if (intel_gt_is_wedged(engine->gt)) 2252 drm_printf(m, "*** WEDGED ***\n"); 2253 2254 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); 2255 drm_printf(m, "\tBarriers?: %s\n", 2256 str_yes_no(!llist_empty(&engine->barrier_tasks))); 2257 drm_printf(m, "\tLatency: %luus\n", 2258 ewma__engine_latency_read(&engine->latency)); 2259 if (intel_engine_supports_stats(engine)) 2260 drm_printf(m, "\tRuntime: %llums\n", 2261 ktime_to_ms(intel_engine_get_busy_time(engine, 2262 &dummy))); 2263 drm_printf(m, "\tForcewake: %x domains, %d active\n", 2264 engine->fw_domain, READ_ONCE(engine->fw_active)); 2265 2266 rcu_read_lock(); 2267 rq = READ_ONCE(engine->heartbeat.systole); 2268 if (rq) 2269 drm_printf(m, "\tHeartbeat: %d ms ago\n", 2270 jiffies_to_msecs(jiffies - rq->emitted_jiffies)); 2271 rcu_read_unlock(); 2272 drm_printf(m, "\tReset count: %d (global %d)\n", 2273 i915_reset_engine_count(error, engine), 2274 i915_reset_count(error)); 2275 print_properties(engine, m); 2276 2277 spin_lock_irqsave(&engine->sched_engine->lock, flags); 2278 engine_dump_active_requests(engine, m); 2279 2280 drm_printf(m, "\tOn hold?: %lu\n", 2281 list_count(&engine->sched_engine->hold)); 2282 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 2283 2284 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); 2285 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm); 2286 if (wakeref) { 2287 intel_engine_print_registers(engine, m); 2288 intel_runtime_pm_put(engine->uncore->rpm, wakeref); 2289 } else { 2290 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 2291 } 2292 2293 intel_execlists_show_requests(engine, m, i915_request_show, 8); 2294 2295 drm_printf(m, "HWSP:\n"); 2296 hexdump(m, engine->status_page.addr, PAGE_SIZE); 2297 2298 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine))); 2299 2300 intel_engine_print_breadcrumbs(engine, m); 2301 } 2302 2303 /** 2304 * intel_engine_get_busy_time() - Return current accumulated engine busyness 2305 * @engine: engine to report on 2306 * @now: monotonic timestamp of sampling 2307 * 2308 * Returns accumulated time @engine was busy since engine stats were enabled. 2309 */ 2310 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) 2311 { 2312 return engine->busyness(engine, now); 2313 } 2314 2315 struct intel_context * 2316 intel_engine_create_virtual(struct intel_engine_cs **siblings, 2317 unsigned int count, unsigned long flags) 2318 { 2319 if (count == 0) 2320 return ERR_PTR(-EINVAL); 2321 2322 if (count == 1 && !(flags & FORCE_VIRTUAL)) 2323 return intel_context_create(siblings[0]); 2324 2325 GEM_BUG_ON(!siblings[0]->cops->create_virtual); 2326 return siblings[0]->cops->create_virtual(siblings, count, flags); 2327 } 2328 2329 struct i915_request * 2330 intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine) 2331 { 2332 struct i915_request *request, *active = NULL; 2333 2334 /* 2335 * This search does not work in GuC submission mode. However, the GuC 2336 * will report the hanging context directly to the driver itself. So 2337 * the driver should never get here when in GuC mode. 2338 */ 2339 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc)); 2340 2341 /* 2342 * We are called by the error capture, reset and to dump engine 2343 * state at random points in time. In particular, note that neither is 2344 * crucially ordered with an interrupt. After a hang, the GPU is dead 2345 * and we assume that no more writes can happen (we waited long enough 2346 * for all writes that were in transaction to be flushed) - adding an 2347 * extra delay for a recent interrupt is pointless. Hence, we do 2348 * not need an engine->irq_seqno_barrier() before the seqno reads. 2349 * At all other times, we must assume the GPU is still running, but 2350 * we only care about the snapshot of this moment. 2351 */ 2352 lockdep_assert_held(&engine->sched_engine->lock); 2353 2354 rcu_read_lock(); 2355 request = execlists_active(&engine->execlists); 2356 if (request) { 2357 struct intel_timeline *tl = request->context->timeline; 2358 2359 list_for_each_entry_from_reverse(request, &tl->requests, link) { 2360 if (__i915_request_is_complete(request)) 2361 break; 2362 2363 active = request; 2364 } 2365 } 2366 rcu_read_unlock(); 2367 if (active) 2368 return active; 2369 2370 list_for_each_entry(request, &engine->sched_engine->requests, 2371 sched.link) { 2372 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE) 2373 continue; 2374 2375 active = request; 2376 break; 2377 } 2378 2379 return active; 2380 } 2381 2382 void xehp_enable_ccs_engines(struct intel_engine_cs *engine) 2383 { 2384 /* 2385 * If there are any non-fused-off CCS engines, we need to enable CCS 2386 * support in the RCU_MODE register. This only needs to be done once, 2387 * so for simplicity we'll take care of this in the RCS engine's 2388 * resume handler; since the RCS and all CCS engines belong to the 2389 * same reset domain and are reset together, this will also take care 2390 * of re-applying the setting after i915-triggered resets. 2391 */ 2392 if (!CCS_MASK(engine->gt)) 2393 return; 2394 2395 intel_uncore_write(engine->uncore, GEN12_RCU_MODE, 2396 _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE)); 2397 } 2398 2399 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2400 #include "mock_engine.c" 2401 #include "selftest_engine.c" 2402 #include "selftest_engine_cs.c" 2403 #endif 2404