1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014 Intel Corporation 4 */ 5 6 #include <linux/circ_buf.h> 7 8 #include "gem/i915_gem_context.h" 9 #include "gt/intel_context.h" 10 #include "gt/intel_engine_pm.h" 11 #include "gt/intel_gt.h" 12 #include "gt/intel_gt_pm.h" 13 #include "gt/intel_lrc_reg.h" 14 #include "gt/intel_ring.h" 15 16 #include "intel_guc_submission.h" 17 18 #include "i915_drv.h" 19 #include "i915_trace.h" 20 21 /** 22 * DOC: GuC-based command submission 23 * 24 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC 25 * firmware is moving to an updated submission interface and we plan to 26 * turn submission back on when that lands. The below documentation (and related 27 * code) matches the old submission model and will be updated as part of the 28 * upgrade to the new flow. 29 * 30 * GuC stage descriptor: 31 * During initialization, the driver allocates a static pool of 1024 such 32 * descriptors, and shares them with the GuC. Currently, we only use one 33 * descriptor. This stage descriptor lets the GuC know about the workqueue and 34 * process descriptor. Theoretically, it also lets the GuC know about our HW 35 * contexts (context ID, etc...), but we actually employ a kind of submission 36 * where the GuC uses the LRCA sent via the work item instead. This is called 37 * a "proxy" submission. 38 * 39 * The Scratch registers: 40 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes 41 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then 42 * triggers an interrupt on the GuC via another register write (0xC4C8). 43 * Firmware writes a success/fail code back to the action register after 44 * processes the request. The kernel driver polls waiting for this update and 45 * then proceeds. 46 * 47 * Work Items: 48 * There are several types of work items that the host may place into a 49 * workqueue, each with its own requirements and limitations. Currently only 50 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which 51 * represents in-order queue. The kernel driver packs ring tail pointer and an 52 * ELSP context descriptor dword into Work Item. 53 * See guc_add_request() 54 * 55 */ 56 57 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 58 { 59 return rb_entry(rb, struct i915_priolist, node); 60 } 61 62 static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id) 63 { 64 struct guc_stage_desc *base = guc->stage_desc_pool_vaddr; 65 66 return &base[id]; 67 } 68 69 static int guc_workqueue_create(struct intel_guc *guc) 70 { 71 return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue, 72 &guc->workqueue_vaddr); 73 } 74 75 static void guc_workqueue_destroy(struct intel_guc *guc) 76 { 77 i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP); 78 } 79 80 /* 81 * Initialise the process descriptor shared with the GuC firmware. 82 */ 83 static int guc_proc_desc_create(struct intel_guc *guc) 84 { 85 const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc)); 86 87 return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc, 88 &guc->proc_desc_vaddr); 89 } 90 91 static void guc_proc_desc_destroy(struct intel_guc *guc) 92 { 93 i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP); 94 } 95 96 static void guc_proc_desc_init(struct intel_guc *guc) 97 { 98 struct guc_process_desc *desc; 99 100 desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc)); 101 102 /* 103 * XXX: pDoorbell and WQVBaseAddress are pointers in process address 104 * space for ring3 clients (set them as in mmap_ioctl) or kernel 105 * space for kernel clients (map on demand instead? May make debug 106 * easier to have it mapped). 107 */ 108 desc->wq_base_addr = 0; 109 desc->db_base_addr = 0; 110 111 desc->wq_size_bytes = GUC_WQ_SIZE; 112 desc->wq_status = WQ_STATUS_ACTIVE; 113 desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; 114 } 115 116 static void guc_proc_desc_fini(struct intel_guc *guc) 117 { 118 memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc)); 119 } 120 121 static int guc_stage_desc_pool_create(struct intel_guc *guc) 122 { 123 u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) * 124 GUC_MAX_STAGE_DESCRIPTORS); 125 126 return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool, 127 &guc->stage_desc_pool_vaddr); 128 } 129 130 static void guc_stage_desc_pool_destroy(struct intel_guc *guc) 131 { 132 i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); 133 } 134 135 /* 136 * Initialise/clear the stage descriptor shared with the GuC firmware. 137 * 138 * This descriptor tells the GuC where (in GGTT space) to find the important 139 * data structures related to work submission (process descriptor, write queue, 140 * etc). 141 */ 142 static void guc_stage_desc_init(struct intel_guc *guc) 143 { 144 struct guc_stage_desc *desc; 145 146 /* we only use 1 stage desc, so hardcode it to 0 */ 147 desc = __get_stage_desc(guc, 0); 148 memset(desc, 0, sizeof(*desc)); 149 150 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | 151 GUC_STAGE_DESC_ATTR_KERNEL; 152 153 desc->stage_id = 0; 154 desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; 155 156 desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc); 157 desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue); 158 desc->wq_size = GUC_WQ_SIZE; 159 } 160 161 static void guc_stage_desc_fini(struct intel_guc *guc) 162 { 163 struct guc_stage_desc *desc; 164 165 desc = __get_stage_desc(guc, 0); 166 memset(desc, 0, sizeof(*desc)); 167 } 168 169 /* Construct a Work Item and append it to the GuC's Work Queue */ 170 static void guc_wq_item_append(struct intel_guc *guc, 171 u32 target_engine, u32 context_desc, 172 u32 ring_tail, u32 fence_id) 173 { 174 /* wqi_len is in DWords, and does not include the one-word header */ 175 const size_t wqi_size = sizeof(struct guc_wq_item); 176 const u32 wqi_len = wqi_size / sizeof(u32) - 1; 177 struct guc_process_desc *desc = guc->proc_desc_vaddr; 178 struct guc_wq_item *wqi; 179 u32 wq_off; 180 181 lockdep_assert_held(&guc->wq_lock); 182 183 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 184 * should not have the case where structure wqi is across page, neither 185 * wrapped to the beginning. This simplifies the implementation below. 186 * 187 * XXX: if not the case, we need save data to a temp wqi and copy it to 188 * workqueue buffer dw by dw. 189 */ 190 BUILD_BUG_ON(wqi_size != 16); 191 192 /* We expect the WQ to be active if we're appending items to it */ 193 GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); 194 195 /* Free space is guaranteed. */ 196 wq_off = READ_ONCE(desc->tail); 197 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), 198 GUC_WQ_SIZE) < wqi_size); 199 GEM_BUG_ON(wq_off & (wqi_size - 1)); 200 201 wqi = guc->workqueue_vaddr + wq_off; 202 203 /* Now fill in the 4-word work queue item */ 204 wqi->header = WQ_TYPE_INORDER | 205 (wqi_len << WQ_LEN_SHIFT) | 206 (target_engine << WQ_TARGET_SHIFT) | 207 WQ_NO_WCFLUSH_WAIT; 208 wqi->context_desc = context_desc; 209 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; 210 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); 211 wqi->fence_id = fence_id; 212 213 /* Make the update visible to GuC */ 214 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); 215 } 216 217 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) 218 { 219 struct intel_engine_cs *engine = rq->engine; 220 u32 ctx_desc = lower_32_bits(rq->context->lrc_desc); 221 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); 222 223 guc_wq_item_append(guc, engine->guc_id, ctx_desc, 224 ring_tail, rq->fence.seqno); 225 } 226 227 /* 228 * When we're doing submissions using regular execlists backend, writing to 229 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages 230 * pinned in mappable aperture portion of GGTT are visible to command streamer. 231 * Writes done by GuC on our behalf are not guaranteeing such ordering, 232 * therefore, to ensure the flush, we're issuing a POSTING READ. 233 */ 234 static void flush_ggtt_writes(struct i915_vma *vma) 235 { 236 if (i915_vma_is_map_and_fenceable(vma)) 237 intel_uncore_posting_read_fw(vma->vm->gt->uncore, 238 GUC_STATUS); 239 } 240 241 static void guc_submit(struct intel_engine_cs *engine, 242 struct i915_request **out, 243 struct i915_request **end) 244 { 245 struct intel_guc *guc = &engine->gt->uc.guc; 246 247 spin_lock(&guc->wq_lock); 248 249 do { 250 struct i915_request *rq = *out++; 251 252 flush_ggtt_writes(rq->ring->vma); 253 guc_add_request(guc, rq); 254 } while (out != end); 255 256 spin_unlock(&guc->wq_lock); 257 } 258 259 static inline int rq_prio(const struct i915_request *rq) 260 { 261 return rq->sched.attr.priority | __NO_PREEMPTION; 262 } 263 264 static struct i915_request *schedule_in(struct i915_request *rq, int idx) 265 { 266 trace_i915_request_in(rq, idx); 267 268 /* 269 * Currently we are not tracking the rq->context being inflight 270 * (ce->inflight = rq->engine). It is only used by the execlists 271 * backend at the moment, a similar counting strategy would be 272 * required if we generalise the inflight tracking. 273 */ 274 275 __intel_gt_pm_get(rq->engine->gt); 276 return i915_request_get(rq); 277 } 278 279 static void schedule_out(struct i915_request *rq) 280 { 281 trace_i915_request_out(rq); 282 283 intel_gt_pm_put_async(rq->engine->gt); 284 i915_request_put(rq); 285 } 286 287 static void __guc_dequeue(struct intel_engine_cs *engine) 288 { 289 struct intel_engine_execlists * const execlists = &engine->execlists; 290 struct i915_request **first = execlists->inflight; 291 struct i915_request ** const last_port = first + execlists->port_mask; 292 struct i915_request *last = first[0]; 293 struct i915_request **port; 294 bool submit = false; 295 struct rb_node *rb; 296 297 lockdep_assert_held(&engine->active.lock); 298 299 if (last) { 300 if (*++first) 301 return; 302 303 last = NULL; 304 } 305 306 /* 307 * We write directly into the execlists->inflight queue and don't use 308 * the execlists->pending queue, as we don't have a distinct switch 309 * event. 310 */ 311 port = first; 312 while ((rb = rb_first_cached(&execlists->queue))) { 313 struct i915_priolist *p = to_priolist(rb); 314 struct i915_request *rq, *rn; 315 int i; 316 317 priolist_for_each_request_consume(rq, rn, p, i) { 318 if (last && rq->context != last->context) { 319 if (port == last_port) 320 goto done; 321 322 *port = schedule_in(last, 323 port - execlists->inflight); 324 port++; 325 } 326 327 list_del_init(&rq->sched.link); 328 __i915_request_submit(rq); 329 submit = true; 330 last = rq; 331 } 332 333 rb_erase_cached(&p->node, &execlists->queue); 334 i915_priolist_free(p); 335 } 336 done: 337 execlists->queue_priority_hint = 338 rb ? to_priolist(rb)->priority : INT_MIN; 339 if (submit) { 340 *port = schedule_in(last, port - execlists->inflight); 341 *++port = NULL; 342 guc_submit(engine, first, port); 343 } 344 execlists->active = execlists->inflight; 345 } 346 347 static void guc_submission_tasklet(unsigned long data) 348 { 349 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 350 struct intel_engine_execlists * const execlists = &engine->execlists; 351 struct i915_request **port, *rq; 352 unsigned long flags; 353 354 spin_lock_irqsave(&engine->active.lock, flags); 355 356 for (port = execlists->inflight; (rq = *port); port++) { 357 if (!i915_request_completed(rq)) 358 break; 359 360 schedule_out(rq); 361 } 362 if (port != execlists->inflight) { 363 int idx = port - execlists->inflight; 364 int rem = ARRAY_SIZE(execlists->inflight) - idx; 365 memmove(execlists->inflight, port, rem * sizeof(*port)); 366 } 367 368 __guc_dequeue(engine); 369 370 spin_unlock_irqrestore(&engine->active.lock, flags); 371 } 372 373 static void guc_reset_prepare(struct intel_engine_cs *engine) 374 { 375 struct intel_engine_execlists * const execlists = &engine->execlists; 376 377 ENGINE_TRACE(engine, "\n"); 378 379 /* 380 * Prevent request submission to the hardware until we have 381 * completed the reset in i915_gem_reset_finish(). If a request 382 * is completed by one engine, it may then queue a request 383 * to a second via its execlists->tasklet *just* as we are 384 * calling engine->init_hw() and also writing the ELSP. 385 * Turning off the execlists->tasklet until the reset is over 386 * prevents the race. 387 */ 388 __tasklet_disable_sync_once(&execlists->tasklet); 389 } 390 391 static void 392 cancel_port_requests(struct intel_engine_execlists * const execlists) 393 { 394 struct i915_request * const *port, *rq; 395 396 /* Note we are only using the inflight and not the pending queue */ 397 398 for (port = execlists->active; (rq = *port); port++) 399 schedule_out(rq); 400 execlists->active = 401 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 402 } 403 404 static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) 405 { 406 struct intel_engine_execlists * const execlists = &engine->execlists; 407 struct i915_request *rq; 408 unsigned long flags; 409 410 spin_lock_irqsave(&engine->active.lock, flags); 411 412 cancel_port_requests(execlists); 413 414 /* Push back any incomplete requests for replay after the reset. */ 415 rq = execlists_unwind_incomplete_requests(execlists); 416 if (!rq) 417 goto out_unlock; 418 419 if (!i915_request_started(rq)) 420 stalled = false; 421 422 __i915_request_reset(rq, stalled); 423 intel_lr_context_reset(engine, rq->context, rq->head, stalled); 424 425 out_unlock: 426 spin_unlock_irqrestore(&engine->active.lock, flags); 427 } 428 429 static void guc_reset_cancel(struct intel_engine_cs *engine) 430 { 431 struct intel_engine_execlists * const execlists = &engine->execlists; 432 struct i915_request *rq, *rn; 433 struct rb_node *rb; 434 unsigned long flags; 435 436 ENGINE_TRACE(engine, "\n"); 437 438 /* 439 * Before we call engine->cancel_requests(), we should have exclusive 440 * access to the submission state. This is arranged for us by the 441 * caller disabling the interrupt generation, the tasklet and other 442 * threads that may then access the same state, giving us a free hand 443 * to reset state. However, we still need to let lockdep be aware that 444 * we know this state may be accessed in hardirq context, so we 445 * disable the irq around this manipulation and we want to keep 446 * the spinlock focused on its duties and not accidentally conflate 447 * coverage to the submission's irq state. (Similarly, although we 448 * shouldn't need to disable irq around the manipulation of the 449 * submission's irq state, we also wish to remind ourselves that 450 * it is irq state.) 451 */ 452 spin_lock_irqsave(&engine->active.lock, flags); 453 454 /* Cancel the requests on the HW and clear the ELSP tracker. */ 455 cancel_port_requests(execlists); 456 457 /* Mark all executing requests as skipped. */ 458 list_for_each_entry(rq, &engine->active.requests, sched.link) { 459 if (!i915_request_signaled(rq)) 460 dma_fence_set_error(&rq->fence, -EIO); 461 462 i915_request_mark_complete(rq); 463 } 464 465 /* Flush the queued requests to the timeline list (for retiring). */ 466 while ((rb = rb_first_cached(&execlists->queue))) { 467 struct i915_priolist *p = to_priolist(rb); 468 int i; 469 470 priolist_for_each_request_consume(rq, rn, p, i) { 471 list_del_init(&rq->sched.link); 472 __i915_request_submit(rq); 473 dma_fence_set_error(&rq->fence, -EIO); 474 i915_request_mark_complete(rq); 475 } 476 477 rb_erase_cached(&p->node, &execlists->queue); 478 i915_priolist_free(p); 479 } 480 481 /* Remaining _unready_ requests will be nop'ed when submitted */ 482 483 execlists->queue_priority_hint = INT_MIN; 484 execlists->queue = RB_ROOT_CACHED; 485 486 spin_unlock_irqrestore(&engine->active.lock, flags); 487 } 488 489 static void guc_reset_finish(struct intel_engine_cs *engine) 490 { 491 struct intel_engine_execlists * const execlists = &engine->execlists; 492 493 if (__tasklet_enable(&execlists->tasklet)) 494 /* And kick in case we missed a new request submission. */ 495 tasklet_hi_schedule(&execlists->tasklet); 496 497 ENGINE_TRACE(engine, "depth->%d\n", 498 atomic_read(&execlists->tasklet.count)); 499 } 500 501 /* 502 * Everything below here is concerned with setup & teardown, and is 503 * therefore not part of the somewhat time-critical batch-submission 504 * path of guc_submit() above. 505 */ 506 507 /* 508 * Set up the memory resources to be shared with the GuC (via the GGTT) 509 * at firmware loading time. 510 */ 511 int intel_guc_submission_init(struct intel_guc *guc) 512 { 513 int ret; 514 515 if (guc->stage_desc_pool) 516 return 0; 517 518 ret = guc_stage_desc_pool_create(guc); 519 if (ret) 520 return ret; 521 /* 522 * Keep static analysers happy, let them know that we allocated the 523 * vma after testing that it didn't exist earlier. 524 */ 525 GEM_BUG_ON(!guc->stage_desc_pool); 526 527 ret = guc_workqueue_create(guc); 528 if (ret) 529 goto err_pool; 530 531 ret = guc_proc_desc_create(guc); 532 if (ret) 533 goto err_workqueue; 534 535 spin_lock_init(&guc->wq_lock); 536 537 return 0; 538 539 err_workqueue: 540 guc_workqueue_destroy(guc); 541 err_pool: 542 guc_stage_desc_pool_destroy(guc); 543 return ret; 544 } 545 546 void intel_guc_submission_fini(struct intel_guc *guc) 547 { 548 if (guc->stage_desc_pool) { 549 guc_proc_desc_destroy(guc); 550 guc_workqueue_destroy(guc); 551 guc_stage_desc_pool_destroy(guc); 552 } 553 } 554 555 static void guc_interrupts_capture(struct intel_gt *gt) 556 { 557 struct intel_uncore *uncore = gt->uncore; 558 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; 559 u32 dmask = irqs << 16 | irqs; 560 561 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); 562 563 /* Don't handle the ctx switch interrupt in GuC submission mode */ 564 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask, 0); 565 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask, 0); 566 } 567 568 static void guc_interrupts_release(struct intel_gt *gt) 569 { 570 struct intel_uncore *uncore = gt->uncore; 571 u32 irqs = GT_CONTEXT_SWITCH_INTERRUPT; 572 u32 dmask = irqs << 16 | irqs; 573 574 GEM_BUG_ON(INTEL_GEN(gt->i915) < 11); 575 576 /* Handle ctx switch interrupts again */ 577 intel_uncore_rmw(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0, dmask); 578 intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask); 579 } 580 581 static void guc_set_default_submission(struct intel_engine_cs *engine) 582 { 583 /* 584 * We inherit a bunch of functions from execlists that we'd like 585 * to keep using: 586 * 587 * engine->submit_request = execlists_submit_request; 588 * engine->cancel_requests = execlists_cancel_requests; 589 * engine->schedule = execlists_schedule; 590 * 591 * But we need to override the actual submission backend in order 592 * to talk to the GuC. 593 */ 594 intel_execlists_set_default_submission(engine); 595 596 engine->execlists.tasklet.func = guc_submission_tasklet; 597 598 /* do not use execlists park/unpark */ 599 engine->park = engine->unpark = NULL; 600 601 engine->reset.prepare = guc_reset_prepare; 602 engine->reset.rewind = guc_reset_rewind; 603 engine->reset.cancel = guc_reset_cancel; 604 engine->reset.finish = guc_reset_finish; 605 606 engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; 607 engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; 608 609 /* 610 * For the breadcrumb irq to work we need the interrupts to stay 611 * enabled. However, on all platforms on which we'll have support for 612 * GuC submission we don't allow disabling the interrupts at runtime, so 613 * we're always safe with the current flow. 614 */ 615 GEM_BUG_ON(engine->irq_enable || engine->irq_disable); 616 } 617 618 void intel_guc_submission_enable(struct intel_guc *guc) 619 { 620 struct intel_gt *gt = guc_to_gt(guc); 621 struct intel_engine_cs *engine; 622 enum intel_engine_id id; 623 624 /* 625 * We're using GuC work items for submitting work through GuC. Since 626 * we're coalescing multiple requests from a single context into a 627 * single work item prior to assigning it to execlist_port, we can 628 * never have more work items than the total number of ports (for all 629 * engines). The GuC firmware is controlling the HEAD of work queue, 630 * and it is guaranteed that it will remove the work item from the 631 * queue before our request is completed. 632 */ 633 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * 634 sizeof(struct guc_wq_item) * 635 I915_NUM_ENGINES > GUC_WQ_SIZE); 636 637 guc_proc_desc_init(guc); 638 guc_stage_desc_init(guc); 639 640 /* Take over from manual control of ELSP (execlists) */ 641 guc_interrupts_capture(gt); 642 643 for_each_engine(engine, gt, id) { 644 engine->set_default_submission = guc_set_default_submission; 645 engine->set_default_submission(engine); 646 } 647 } 648 649 void intel_guc_submission_disable(struct intel_guc *guc) 650 { 651 struct intel_gt *gt = guc_to_gt(guc); 652 653 GEM_BUG_ON(gt->awake); /* GT should be parked first */ 654 655 /* Note: By the time we're here, GuC may have already been reset */ 656 657 guc_interrupts_release(gt); 658 659 guc_stage_desc_fini(guc); 660 guc_proc_desc_fini(guc); 661 } 662 663 static bool __guc_submission_support(struct intel_guc *guc) 664 { 665 /* XXX: GuC submission is unavailable for now */ 666 return false; 667 668 if (!intel_guc_is_supported(guc)) 669 return false; 670 671 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; 672 } 673 674 void intel_guc_submission_init_early(struct intel_guc *guc) 675 { 676 guc->submission_supported = __guc_submission_support(guc); 677 } 678 679 bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine) 680 { 681 return engine->set_default_submission == guc_set_default_submission; 682 } 683