1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014 Intel Corporation 4 */ 5 6 #include <linux/circ_buf.h> 7 8 #include "gem/i915_gem_context.h" 9 #include "gt/intel_context.h" 10 #include "gt/intel_engine_pm.h" 11 #include "gt/intel_gt.h" 12 #include "gt/intel_gt_pm.h" 13 #include "gt/intel_lrc_reg.h" 14 #include "gt/intel_ring.h" 15 16 #include "intel_guc_submission.h" 17 18 #include "i915_drv.h" 19 #include "i915_trace.h" 20 21 enum { 22 GUC_PREEMPT_NONE = 0, 23 GUC_PREEMPT_INPROGRESS, 24 GUC_PREEMPT_FINISHED, 25 }; 26 #define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8 27 #define GUC_PREEMPT_BREADCRUMB_BYTES \ 28 (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS) 29 30 /** 31 * DOC: GuC-based command submission 32 * 33 * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC 34 * firmware is moving to an updated submission interface and we plan to 35 * turn submission back on when that lands. The below documentation (and related 36 * code) matches the old submission model and will be updated as part of the 37 * upgrade to the new flow. 38 * 39 * GuC client: 40 * A intel_guc_client refers to a submission path through GuC. Currently, there 41 * is only one client, which is charged with all submissions to the GuC. This 42 * struct is the owner of a doorbell, a process descriptor and a workqueue (all 43 * of them inside a single gem object that contains all required pages for these 44 * elements). 45 * 46 * GuC stage descriptor: 47 * During initialization, the driver allocates a static pool of 1024 such 48 * descriptors, and shares them with the GuC. 49 * Currently, there exists a 1:1 mapping between a intel_guc_client and a 50 * guc_stage_desc (via the client's stage_id), so effectively only one 51 * gets used. This stage descriptor lets the GuC know about the doorbell, 52 * workqueue and process descriptor. Theoretically, it also lets the GuC 53 * know about our HW contexts (context ID, etc...), but we actually 54 * employ a kind of submission where the GuC uses the LRCA sent via the work 55 * item instead (the single guc_stage_desc associated to execbuf client 56 * contains information about the default kernel context only, but this is 57 * essentially unused). This is called a "proxy" submission. 58 * 59 * The Scratch registers: 60 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes 61 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then 62 * triggers an interrupt on the GuC via another register write (0xC4C8). 63 * Firmware writes a success/fail code back to the action register after 64 * processes the request. The kernel driver polls waiting for this update and 65 * then proceeds. 66 * See intel_guc_send() 67 * 68 * Doorbells: 69 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW) 70 * mapped into process space. 71 * 72 * Work Items: 73 * There are several types of work items that the host may place into a 74 * workqueue, each with its own requirements and limitations. Currently only 75 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which 76 * represents in-order queue. The kernel driver packs ring tail pointer and an 77 * ELSP context descriptor dword into Work Item. 78 * See guc_add_request() 79 * 80 */ 81 82 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 83 { 84 return rb_entry(rb, struct i915_priolist, node); 85 } 86 87 static inline bool is_high_priority(struct intel_guc_client *client) 88 { 89 return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH || 90 client->priority == GUC_CLIENT_PRIORITY_HIGH); 91 } 92 93 static int reserve_doorbell(struct intel_guc_client *client) 94 { 95 unsigned long offset; 96 unsigned long end; 97 u16 id; 98 99 GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID); 100 101 /* 102 * The bitmap tracks which doorbell registers are currently in use. 103 * It is split into two halves; the first half is used for normal 104 * priority contexts, the second half for high-priority ones. 105 */ 106 offset = 0; 107 end = GUC_NUM_DOORBELLS / 2; 108 if (is_high_priority(client)) { 109 offset = end; 110 end += offset; 111 } 112 113 id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset); 114 if (id == end) 115 return -ENOSPC; 116 117 __set_bit(id, client->guc->doorbell_bitmap); 118 client->doorbell_id = id; 119 DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n", 120 client->stage_id, yesno(is_high_priority(client)), 121 id); 122 return 0; 123 } 124 125 static bool has_doorbell(struct intel_guc_client *client) 126 { 127 if (client->doorbell_id == GUC_DOORBELL_INVALID) 128 return false; 129 130 return test_bit(client->doorbell_id, client->guc->doorbell_bitmap); 131 } 132 133 static void unreserve_doorbell(struct intel_guc_client *client) 134 { 135 GEM_BUG_ON(!has_doorbell(client)); 136 137 __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap); 138 client->doorbell_id = GUC_DOORBELL_INVALID; 139 } 140 141 /* 142 * Tell the GuC to allocate or deallocate a specific doorbell 143 */ 144 145 static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id) 146 { 147 u32 action[] = { 148 INTEL_GUC_ACTION_ALLOCATE_DOORBELL, 149 stage_id 150 }; 151 152 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 153 } 154 155 static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id) 156 { 157 u32 action[] = { 158 INTEL_GUC_ACTION_DEALLOCATE_DOORBELL, 159 stage_id 160 }; 161 162 return intel_guc_send(guc, action, ARRAY_SIZE(action)); 163 } 164 165 static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client) 166 { 167 struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr; 168 169 return &base[client->stage_id]; 170 } 171 172 /* 173 * Initialise, update, or clear doorbell data shared with the GuC 174 * 175 * These functions modify shared data and so need access to the mapped 176 * client object which contains the page being used for the doorbell 177 */ 178 179 static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id) 180 { 181 struct guc_stage_desc *desc; 182 183 /* Update the GuC's idea of the doorbell ID */ 184 desc = __get_stage_desc(client); 185 desc->db_id = new_id; 186 } 187 188 static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) 189 { 190 return client->vaddr + client->doorbell_offset; 191 } 192 193 static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) 194 { 195 struct intel_uncore *uncore = guc_to_gt(guc)->uncore; 196 197 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); 198 return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; 199 } 200 201 static void __init_doorbell(struct intel_guc_client *client) 202 { 203 struct guc_doorbell_info *doorbell; 204 205 doorbell = __get_doorbell(client); 206 doorbell->db_status = GUC_DOORBELL_ENABLED; 207 doorbell->cookie = 0; 208 } 209 210 static void __fini_doorbell(struct intel_guc_client *client) 211 { 212 struct guc_doorbell_info *doorbell; 213 u16 db_id = client->doorbell_id; 214 215 doorbell = __get_doorbell(client); 216 doorbell->db_status = GUC_DOORBELL_DISABLED; 217 218 /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit 219 * to go to zero after updating db_status before we call the GuC to 220 * release the doorbell 221 */ 222 if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) 223 WARN_ONCE(true, "Doorbell never became invalid after disable\n"); 224 } 225 226 static int create_doorbell(struct intel_guc_client *client) 227 { 228 int ret; 229 230 if (WARN_ON(!has_doorbell(client))) 231 return -ENODEV; /* internal setup error, should never happen */ 232 233 __update_doorbell_desc(client, client->doorbell_id); 234 __init_doorbell(client); 235 236 ret = __guc_allocate_doorbell(client->guc, client->stage_id); 237 if (ret) { 238 __fini_doorbell(client); 239 __update_doorbell_desc(client, GUC_DOORBELL_INVALID); 240 DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", 241 client->stage_id, ret); 242 return ret; 243 } 244 245 return 0; 246 } 247 248 static int destroy_doorbell(struct intel_guc_client *client) 249 { 250 int ret; 251 252 GEM_BUG_ON(!has_doorbell(client)); 253 254 __fini_doorbell(client); 255 ret = __guc_deallocate_doorbell(client->guc, client->stage_id); 256 if (ret) 257 DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", 258 client->stage_id, ret); 259 260 __update_doorbell_desc(client, GUC_DOORBELL_INVALID); 261 262 return ret; 263 } 264 265 static unsigned long __select_cacheline(struct intel_guc *guc) 266 { 267 unsigned long offset; 268 269 /* Doorbell uses a single cache line within a page */ 270 offset = offset_in_page(guc->db_cacheline); 271 272 /* Moving to next cache line to reduce contention */ 273 guc->db_cacheline += cache_line_size(); 274 275 DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n", 276 offset, guc->db_cacheline, cache_line_size()); 277 return offset; 278 } 279 280 static inline struct guc_process_desc * 281 __get_process_desc(struct intel_guc_client *client) 282 { 283 return client->vaddr + client->proc_desc_offset; 284 } 285 286 /* 287 * Initialise the process descriptor shared with the GuC firmware. 288 */ 289 static void guc_proc_desc_init(struct intel_guc_client *client) 290 { 291 struct guc_process_desc *desc; 292 293 desc = memset(__get_process_desc(client), 0, sizeof(*desc)); 294 295 /* 296 * XXX: pDoorbell and WQVBaseAddress are pointers in process address 297 * space for ring3 clients (set them as in mmap_ioctl) or kernel 298 * space for kernel clients (map on demand instead? May make debug 299 * easier to have it mapped). 300 */ 301 desc->wq_base_addr = 0; 302 desc->db_base_addr = 0; 303 304 desc->stage_id = client->stage_id; 305 desc->wq_size_bytes = GUC_WQ_SIZE; 306 desc->wq_status = WQ_STATUS_ACTIVE; 307 desc->priority = client->priority; 308 } 309 310 static void guc_proc_desc_fini(struct intel_guc_client *client) 311 { 312 struct guc_process_desc *desc; 313 314 desc = __get_process_desc(client); 315 memset(desc, 0, sizeof(*desc)); 316 } 317 318 static int guc_stage_desc_pool_create(struct intel_guc *guc) 319 { 320 struct i915_vma *vma; 321 void *vaddr; 322 323 vma = intel_guc_allocate_vma(guc, 324 PAGE_ALIGN(sizeof(struct guc_stage_desc) * 325 GUC_MAX_STAGE_DESCRIPTORS)); 326 if (IS_ERR(vma)) 327 return PTR_ERR(vma); 328 329 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 330 if (IS_ERR(vaddr)) { 331 i915_vma_unpin_and_release(&vma, 0); 332 return PTR_ERR(vaddr); 333 } 334 335 guc->stage_desc_pool = vma; 336 guc->stage_desc_pool_vaddr = vaddr; 337 ida_init(&guc->stage_ids); 338 339 return 0; 340 } 341 342 static void guc_stage_desc_pool_destroy(struct intel_guc *guc) 343 { 344 ida_destroy(&guc->stage_ids); 345 i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP); 346 } 347 348 /* 349 * Initialise/clear the stage descriptor shared with the GuC firmware. 350 * 351 * This descriptor tells the GuC where (in GGTT space) to find the important 352 * data structures relating to this client (doorbell, process descriptor, 353 * write queue, etc). 354 */ 355 static void guc_stage_desc_init(struct intel_guc_client *client) 356 { 357 struct intel_guc *guc = client->guc; 358 struct guc_stage_desc *desc; 359 u32 gfx_addr; 360 361 desc = __get_stage_desc(client); 362 memset(desc, 0, sizeof(*desc)); 363 364 desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | 365 GUC_STAGE_DESC_ATTR_KERNEL; 366 if (is_high_priority(client)) 367 desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT; 368 desc->stage_id = client->stage_id; 369 desc->priority = client->priority; 370 desc->db_id = client->doorbell_id; 371 372 /* 373 * The doorbell, process descriptor, and workqueue are all parts 374 * of the client object, which the GuC will reference via the GGTT 375 */ 376 gfx_addr = intel_guc_ggtt_offset(guc, client->vma); 377 desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) + 378 client->doorbell_offset; 379 desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client)); 380 desc->db_trigger_uk = gfx_addr + client->doorbell_offset; 381 desc->process_desc = gfx_addr + client->proc_desc_offset; 382 desc->wq_addr = gfx_addr + GUC_DB_SIZE; 383 desc->wq_size = GUC_WQ_SIZE; 384 385 desc->desc_private = ptr_to_u64(client); 386 } 387 388 static void guc_stage_desc_fini(struct intel_guc_client *client) 389 { 390 struct guc_stage_desc *desc; 391 392 desc = __get_stage_desc(client); 393 memset(desc, 0, sizeof(*desc)); 394 } 395 396 /* Construct a Work Item and append it to the GuC's Work Queue */ 397 static void guc_wq_item_append(struct intel_guc_client *client, 398 u32 target_engine, u32 context_desc, 399 u32 ring_tail, u32 fence_id) 400 { 401 /* wqi_len is in DWords, and does not include the one-word header */ 402 const size_t wqi_size = sizeof(struct guc_wq_item); 403 const u32 wqi_len = wqi_size / sizeof(u32) - 1; 404 struct guc_process_desc *desc = __get_process_desc(client); 405 struct guc_wq_item *wqi; 406 u32 wq_off; 407 408 lockdep_assert_held(&client->wq_lock); 409 410 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 411 * should not have the case where structure wqi is across page, neither 412 * wrapped to the beginning. This simplifies the implementation below. 413 * 414 * XXX: if not the case, we need save data to a temp wqi and copy it to 415 * workqueue buffer dw by dw. 416 */ 417 BUILD_BUG_ON(wqi_size != 16); 418 419 /* We expect the WQ to be active if we're appending items to it */ 420 GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); 421 422 /* Free space is guaranteed. */ 423 wq_off = READ_ONCE(desc->tail); 424 GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), 425 GUC_WQ_SIZE) < wqi_size); 426 GEM_BUG_ON(wq_off & (wqi_size - 1)); 427 428 /* WQ starts from the page after doorbell / process_desc */ 429 wqi = client->vaddr + wq_off + GUC_DB_SIZE; 430 431 if (I915_SELFTEST_ONLY(client->use_nop_wqi)) { 432 wqi->header = WQ_TYPE_NOOP | (wqi_len << WQ_LEN_SHIFT); 433 } else { 434 /* Now fill in the 4-word work queue item */ 435 wqi->header = WQ_TYPE_INORDER | 436 (wqi_len << WQ_LEN_SHIFT) | 437 (target_engine << WQ_TARGET_SHIFT) | 438 WQ_NO_WCFLUSH_WAIT; 439 wqi->context_desc = context_desc; 440 wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; 441 GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); 442 wqi->fence_id = fence_id; 443 } 444 445 /* Make the update visible to GuC */ 446 WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); 447 } 448 449 static void guc_ring_doorbell(struct intel_guc_client *client) 450 { 451 struct guc_doorbell_info *db; 452 u32 cookie; 453 454 lockdep_assert_held(&client->wq_lock); 455 456 /* pointer of current doorbell cacheline */ 457 db = __get_doorbell(client); 458 459 /* 460 * We're not expecting the doorbell cookie to change behind our back, 461 * we also need to treat 0 as a reserved value. 462 */ 463 cookie = READ_ONCE(db->cookie); 464 WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); 465 466 /* XXX: doorbell was lost and need to acquire it again */ 467 GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); 468 } 469 470 static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) 471 { 472 struct intel_guc_client *client = guc->execbuf_client; 473 struct intel_engine_cs *engine = rq->engine; 474 u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc); 475 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); 476 477 guc_wq_item_append(client, engine->guc_id, ctx_desc, 478 ring_tail, rq->fence.seqno); 479 guc_ring_doorbell(client); 480 } 481 482 /* 483 * When we're doing submissions using regular execlists backend, writing to 484 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages 485 * pinned in mappable aperture portion of GGTT are visible to command streamer. 486 * Writes done by GuC on our behalf are not guaranteeing such ordering, 487 * therefore, to ensure the flush, we're issuing a POSTING READ. 488 */ 489 static void flush_ggtt_writes(struct i915_vma *vma) 490 { 491 struct drm_i915_private *i915 = vma->vm->i915; 492 493 if (i915_vma_is_map_and_fenceable(vma)) 494 intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS); 495 } 496 497 static void guc_submit(struct intel_engine_cs *engine, 498 struct i915_request **out, 499 struct i915_request **end) 500 { 501 struct intel_guc *guc = &engine->gt->uc.guc; 502 struct intel_guc_client *client = guc->execbuf_client; 503 504 spin_lock(&client->wq_lock); 505 506 do { 507 struct i915_request *rq = *out++; 508 509 flush_ggtt_writes(rq->ring->vma); 510 guc_add_request(guc, rq); 511 } while (out != end); 512 513 spin_unlock(&client->wq_lock); 514 } 515 516 static inline int rq_prio(const struct i915_request *rq) 517 { 518 return rq->sched.attr.priority | __NO_PREEMPTION; 519 } 520 521 static struct i915_request *schedule_in(struct i915_request *rq, int idx) 522 { 523 trace_i915_request_in(rq, idx); 524 525 /* 526 * Currently we are not tracking the rq->context being inflight 527 * (ce->inflight = rq->engine). It is only used by the execlists 528 * backend at the moment, a similar counting strategy would be 529 * required if we generalise the inflight tracking. 530 */ 531 532 intel_gt_pm_get(rq->engine->gt); 533 return i915_request_get(rq); 534 } 535 536 static void schedule_out(struct i915_request *rq) 537 { 538 trace_i915_request_out(rq); 539 540 intel_gt_pm_put(rq->engine->gt); 541 i915_request_put(rq); 542 } 543 544 static void __guc_dequeue(struct intel_engine_cs *engine) 545 { 546 struct intel_engine_execlists * const execlists = &engine->execlists; 547 struct i915_request **first = execlists->inflight; 548 struct i915_request ** const last_port = first + execlists->port_mask; 549 struct i915_request *last = first[0]; 550 struct i915_request **port; 551 bool submit = false; 552 struct rb_node *rb; 553 554 lockdep_assert_held(&engine->active.lock); 555 556 if (last) { 557 if (*++first) 558 return; 559 560 last = NULL; 561 } 562 563 /* 564 * We write directly into the execlists->inflight queue and don't use 565 * the execlists->pending queue, as we don't have a distinct switch 566 * event. 567 */ 568 port = first; 569 while ((rb = rb_first_cached(&execlists->queue))) { 570 struct i915_priolist *p = to_priolist(rb); 571 struct i915_request *rq, *rn; 572 int i; 573 574 priolist_for_each_request_consume(rq, rn, p, i) { 575 if (last && rq->hw_context != last->hw_context) { 576 if (port == last_port) 577 goto done; 578 579 *port = schedule_in(last, 580 port - execlists->inflight); 581 port++; 582 } 583 584 list_del_init(&rq->sched.link); 585 __i915_request_submit(rq); 586 submit = true; 587 last = rq; 588 } 589 590 rb_erase_cached(&p->node, &execlists->queue); 591 i915_priolist_free(p); 592 } 593 done: 594 execlists->queue_priority_hint = 595 rb ? to_priolist(rb)->priority : INT_MIN; 596 if (submit) { 597 *port = schedule_in(last, port - execlists->inflight); 598 *++port = NULL; 599 guc_submit(engine, first, port); 600 } 601 execlists->active = execlists->inflight; 602 } 603 604 static void guc_submission_tasklet(unsigned long data) 605 { 606 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 607 struct intel_engine_execlists * const execlists = &engine->execlists; 608 struct i915_request **port, *rq; 609 unsigned long flags; 610 611 spin_lock_irqsave(&engine->active.lock, flags); 612 613 for (port = execlists->inflight; (rq = *port); port++) { 614 if (!i915_request_completed(rq)) 615 break; 616 617 schedule_out(rq); 618 } 619 if (port != execlists->inflight) { 620 int idx = port - execlists->inflight; 621 int rem = ARRAY_SIZE(execlists->inflight) - idx; 622 memmove(execlists->inflight, port, rem * sizeof(*port)); 623 } 624 625 __guc_dequeue(engine); 626 627 spin_unlock_irqrestore(&engine->active.lock, flags); 628 } 629 630 static void guc_reset_prepare(struct intel_engine_cs *engine) 631 { 632 struct intel_engine_execlists * const execlists = &engine->execlists; 633 634 GEM_TRACE("%s\n", engine->name); 635 636 /* 637 * Prevent request submission to the hardware until we have 638 * completed the reset in i915_gem_reset_finish(). If a request 639 * is completed by one engine, it may then queue a request 640 * to a second via its execlists->tasklet *just* as we are 641 * calling engine->init_hw() and also writing the ELSP. 642 * Turning off the execlists->tasklet until the reset is over 643 * prevents the race. 644 */ 645 __tasklet_disable_sync_once(&execlists->tasklet); 646 } 647 648 static void 649 cancel_port_requests(struct intel_engine_execlists * const execlists) 650 { 651 struct i915_request * const *port, *rq; 652 653 /* Note we are only using the inflight and not the pending queue */ 654 655 for (port = execlists->active; (rq = *port); port++) 656 schedule_out(rq); 657 execlists->active = 658 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 659 } 660 661 static void guc_reset(struct intel_engine_cs *engine, bool stalled) 662 { 663 struct intel_engine_execlists * const execlists = &engine->execlists; 664 struct i915_request *rq; 665 unsigned long flags; 666 667 spin_lock_irqsave(&engine->active.lock, flags); 668 669 cancel_port_requests(execlists); 670 671 /* Push back any incomplete requests for replay after the reset. */ 672 rq = execlists_unwind_incomplete_requests(execlists); 673 if (!rq) 674 goto out_unlock; 675 676 if (!i915_request_started(rq)) 677 stalled = false; 678 679 __i915_request_reset(rq, stalled); 680 intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled); 681 682 out_unlock: 683 spin_unlock_irqrestore(&engine->active.lock, flags); 684 } 685 686 static void guc_cancel_requests(struct intel_engine_cs *engine) 687 { 688 struct intel_engine_execlists * const execlists = &engine->execlists; 689 struct i915_request *rq, *rn; 690 struct rb_node *rb; 691 unsigned long flags; 692 693 GEM_TRACE("%s\n", engine->name); 694 695 /* 696 * Before we call engine->cancel_requests(), we should have exclusive 697 * access to the submission state. This is arranged for us by the 698 * caller disabling the interrupt generation, the tasklet and other 699 * threads that may then access the same state, giving us a free hand 700 * to reset state. However, we still need to let lockdep be aware that 701 * we know this state may be accessed in hardirq context, so we 702 * disable the irq around this manipulation and we want to keep 703 * the spinlock focused on its duties and not accidentally conflate 704 * coverage to the submission's irq state. (Similarly, although we 705 * shouldn't need to disable irq around the manipulation of the 706 * submission's irq state, we also wish to remind ourselves that 707 * it is irq state.) 708 */ 709 spin_lock_irqsave(&engine->active.lock, flags); 710 711 /* Cancel the requests on the HW and clear the ELSP tracker. */ 712 cancel_port_requests(execlists); 713 714 /* Mark all executing requests as skipped. */ 715 list_for_each_entry(rq, &engine->active.requests, sched.link) { 716 if (!i915_request_signaled(rq)) 717 dma_fence_set_error(&rq->fence, -EIO); 718 719 i915_request_mark_complete(rq); 720 } 721 722 /* Flush the queued requests to the timeline list (for retiring). */ 723 while ((rb = rb_first_cached(&execlists->queue))) { 724 struct i915_priolist *p = to_priolist(rb); 725 int i; 726 727 priolist_for_each_request_consume(rq, rn, p, i) { 728 list_del_init(&rq->sched.link); 729 __i915_request_submit(rq); 730 dma_fence_set_error(&rq->fence, -EIO); 731 i915_request_mark_complete(rq); 732 } 733 734 rb_erase_cached(&p->node, &execlists->queue); 735 i915_priolist_free(p); 736 } 737 738 /* Remaining _unready_ requests will be nop'ed when submitted */ 739 740 execlists->queue_priority_hint = INT_MIN; 741 execlists->queue = RB_ROOT_CACHED; 742 743 spin_unlock_irqrestore(&engine->active.lock, flags); 744 } 745 746 static void guc_reset_finish(struct intel_engine_cs *engine) 747 { 748 struct intel_engine_execlists * const execlists = &engine->execlists; 749 750 if (__tasklet_enable(&execlists->tasklet)) 751 /* And kick in case we missed a new request submission. */ 752 tasklet_hi_schedule(&execlists->tasklet); 753 754 GEM_TRACE("%s: depth->%d\n", engine->name, 755 atomic_read(&execlists->tasklet.count)); 756 } 757 758 /* 759 * Everything below here is concerned with setup & teardown, and is 760 * therefore not part of the somewhat time-critical batch-submission 761 * path of guc_submit() above. 762 */ 763 764 /* Check that a doorbell register is in the expected state */ 765 static bool doorbell_ok(struct intel_guc *guc, u16 db_id) 766 { 767 bool valid; 768 769 GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); 770 771 valid = __doorbell_valid(guc, db_id); 772 773 if (test_bit(db_id, guc->doorbell_bitmap) == valid) 774 return true; 775 776 DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", 777 db_id, yesno(valid)); 778 779 return false; 780 } 781 782 static bool guc_verify_doorbells(struct intel_guc *guc) 783 { 784 bool doorbells_ok = true; 785 u16 db_id; 786 787 for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) 788 if (!doorbell_ok(guc, db_id)) 789 doorbells_ok = false; 790 791 return doorbells_ok; 792 } 793 794 /** 795 * guc_client_alloc() - Allocate an intel_guc_client 796 * @guc: the intel_guc structure 797 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW 798 * The kernel client to replace ExecList submission is created with 799 * NORMAL priority. Priority of a client for scheduler can be HIGH, 800 * while a preemption context can use CRITICAL. 801 * 802 * Return: An intel_guc_client object if success, else NULL. 803 */ 804 static struct intel_guc_client * 805 guc_client_alloc(struct intel_guc *guc, u32 priority) 806 { 807 struct intel_guc_client *client; 808 struct i915_vma *vma; 809 void *vaddr; 810 int ret; 811 812 client = kzalloc(sizeof(*client), GFP_KERNEL); 813 if (!client) 814 return ERR_PTR(-ENOMEM); 815 816 client->guc = guc; 817 client->priority = priority; 818 client->doorbell_id = GUC_DOORBELL_INVALID; 819 spin_lock_init(&client->wq_lock); 820 821 ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, 822 GFP_KERNEL); 823 if (ret < 0) 824 goto err_client; 825 826 client->stage_id = ret; 827 828 /* The first page is doorbell/proc_desc. Two followed pages are wq. */ 829 vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE); 830 if (IS_ERR(vma)) { 831 ret = PTR_ERR(vma); 832 goto err_id; 833 } 834 835 /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */ 836 client->vma = vma; 837 838 vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 839 if (IS_ERR(vaddr)) { 840 ret = PTR_ERR(vaddr); 841 goto err_vma; 842 } 843 client->vaddr = vaddr; 844 845 ret = reserve_doorbell(client); 846 if (ret) 847 goto err_vaddr; 848 849 client->doorbell_offset = __select_cacheline(guc); 850 851 /* 852 * Since the doorbell only requires a single cacheline, we can save 853 * space by putting the application process descriptor in the same 854 * page. Use the half of the page that doesn't include the doorbell. 855 */ 856 if (client->doorbell_offset >= (GUC_DB_SIZE / 2)) 857 client->proc_desc_offset = 0; 858 else 859 client->proc_desc_offset = (GUC_DB_SIZE / 2); 860 861 DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n", 862 priority, client, client->stage_id); 863 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", 864 client->doorbell_id, client->doorbell_offset); 865 866 return client; 867 868 err_vaddr: 869 i915_gem_object_unpin_map(client->vma->obj); 870 err_vma: 871 i915_vma_unpin_and_release(&client->vma, 0); 872 err_id: 873 ida_simple_remove(&guc->stage_ids, client->stage_id); 874 err_client: 875 kfree(client); 876 return ERR_PTR(ret); 877 } 878 879 static void guc_client_free(struct intel_guc_client *client) 880 { 881 unreserve_doorbell(client); 882 i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); 883 ida_simple_remove(&client->guc->stage_ids, client->stage_id); 884 kfree(client); 885 } 886 887 static inline bool ctx_save_restore_disabled(struct intel_context *ce) 888 { 889 u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1]; 890 891 #define SR_DISABLED \ 892 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \ 893 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) 894 895 return (sr & SR_DISABLED) == SR_DISABLED; 896 897 #undef SR_DISABLED 898 } 899 900 static int guc_clients_create(struct intel_guc *guc) 901 { 902 struct intel_guc_client *client; 903 904 GEM_BUG_ON(guc->execbuf_client); 905 906 client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL); 907 if (IS_ERR(client)) { 908 DRM_ERROR("Failed to create GuC client for submission!\n"); 909 return PTR_ERR(client); 910 } 911 guc->execbuf_client = client; 912 913 return 0; 914 } 915 916 static void guc_clients_destroy(struct intel_guc *guc) 917 { 918 struct intel_guc_client *client; 919 920 client = fetch_and_zero(&guc->execbuf_client); 921 if (client) 922 guc_client_free(client); 923 } 924 925 static int __guc_client_enable(struct intel_guc_client *client) 926 { 927 int ret; 928 929 guc_proc_desc_init(client); 930 guc_stage_desc_init(client); 931 932 ret = create_doorbell(client); 933 if (ret) 934 goto fail; 935 936 return 0; 937 938 fail: 939 guc_stage_desc_fini(client); 940 guc_proc_desc_fini(client); 941 return ret; 942 } 943 944 static void __guc_client_disable(struct intel_guc_client *client) 945 { 946 /* 947 * By the time we're here, GuC may have already been reset. if that is 948 * the case, instead of trying (in vain) to communicate with it, let's 949 * just cleanup the doorbell HW and our internal state. 950 */ 951 if (intel_guc_is_running(client->guc)) 952 destroy_doorbell(client); 953 else 954 __fini_doorbell(client); 955 956 guc_stage_desc_fini(client); 957 guc_proc_desc_fini(client); 958 } 959 960 static int guc_clients_enable(struct intel_guc *guc) 961 { 962 return __guc_client_enable(guc->execbuf_client); 963 } 964 965 static void guc_clients_disable(struct intel_guc *guc) 966 { 967 if (guc->execbuf_client) 968 __guc_client_disable(guc->execbuf_client); 969 } 970 971 /* 972 * Set up the memory resources to be shared with the GuC (via the GGTT) 973 * at firmware loading time. 974 */ 975 int intel_guc_submission_init(struct intel_guc *guc) 976 { 977 int ret; 978 979 if (guc->stage_desc_pool) 980 return 0; 981 982 ret = guc_stage_desc_pool_create(guc); 983 if (ret) 984 return ret; 985 /* 986 * Keep static analysers happy, let them know that we allocated the 987 * vma after testing that it didn't exist earlier. 988 */ 989 GEM_BUG_ON(!guc->stage_desc_pool); 990 991 WARN_ON(!guc_verify_doorbells(guc)); 992 ret = guc_clients_create(guc); 993 if (ret) 994 goto err_pool; 995 996 return 0; 997 998 err_pool: 999 guc_stage_desc_pool_destroy(guc); 1000 return ret; 1001 } 1002 1003 void intel_guc_submission_fini(struct intel_guc *guc) 1004 { 1005 guc_clients_destroy(guc); 1006 WARN_ON(!guc_verify_doorbells(guc)); 1007 1008 if (guc->stage_desc_pool) 1009 guc_stage_desc_pool_destroy(guc); 1010 } 1011 1012 static void guc_interrupts_capture(struct intel_gt *gt) 1013 { 1014 struct intel_rps *rps = >->rps; 1015 struct intel_uncore *uncore = gt->uncore; 1016 struct intel_engine_cs *engine; 1017 enum intel_engine_id id; 1018 int irqs; 1019 1020 /* tell all command streamers to forward interrupts (but not vblank) 1021 * to GuC 1022 */ 1023 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); 1024 for_each_engine(engine, gt, id) 1025 ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); 1026 1027 /* route USER_INTERRUPT to Host, all others are sent to GuC. */ 1028 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 1029 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 1030 /* These three registers have the same bit definitions */ 1031 intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs); 1032 intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs); 1033 intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs); 1034 1035 /* 1036 * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all 1037 * (unmasked) PM interrupts to the GuC. All other bits of this 1038 * register *disable* generation of a specific interrupt. 1039 * 1040 * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when 1041 * writing to the PM interrupt mask register, i.e. interrupts 1042 * that must not be disabled. 1043 * 1044 * If the GuC is handling these interrupts, then we must not let 1045 * the PM code disable ANY interrupt that the GuC is expecting. 1046 * So for each ENABLED (0) bit in this register, we must SET the 1047 * bit in pm_intrmsk_mbz so that it's left enabled for the GuC. 1048 * GuC needs ARAT expired interrupt unmasked hence it is set in 1049 * pm_intrmsk_mbz. 1050 * 1051 * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will 1052 * result in the register bit being left SET! 1053 */ 1054 rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK; 1055 rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1056 } 1057 1058 static void guc_interrupts_release(struct intel_gt *gt) 1059 { 1060 struct intel_rps *rps = >->rps; 1061 struct intel_uncore *uncore = gt->uncore; 1062 struct intel_engine_cs *engine; 1063 enum intel_engine_id id; 1064 int irqs; 1065 1066 /* 1067 * tell all command streamers NOT to forward interrupts or vblank 1068 * to GuC. 1069 */ 1070 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); 1071 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); 1072 for_each_engine(engine, gt, id) 1073 ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); 1074 1075 /* route all GT interrupts to the host */ 1076 intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0); 1077 intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0); 1078 intel_uncore_write(uncore, GUC_WD_VECS_IER, 0); 1079 1080 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 1081 rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK; 1082 } 1083 1084 static void guc_set_default_submission(struct intel_engine_cs *engine) 1085 { 1086 /* 1087 * We inherit a bunch of functions from execlists that we'd like 1088 * to keep using: 1089 * 1090 * engine->submit_request = execlists_submit_request; 1091 * engine->cancel_requests = execlists_cancel_requests; 1092 * engine->schedule = execlists_schedule; 1093 * 1094 * But we need to override the actual submission backend in order 1095 * to talk to the GuC. 1096 */ 1097 intel_execlists_set_default_submission(engine); 1098 1099 engine->execlists.tasklet.func = guc_submission_tasklet; 1100 1101 /* do not use execlists park/unpark */ 1102 engine->park = engine->unpark = NULL; 1103 1104 engine->reset.prepare = guc_reset_prepare; 1105 engine->reset.reset = guc_reset; 1106 engine->reset.finish = guc_reset_finish; 1107 1108 engine->cancel_requests = guc_cancel_requests; 1109 1110 engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; 1111 engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; 1112 1113 /* 1114 * For the breadcrumb irq to work we need the interrupts to stay 1115 * enabled. However, on all platforms on which we'll have support for 1116 * GuC submission we don't allow disabling the interrupts at runtime, so 1117 * we're always safe with the current flow. 1118 */ 1119 GEM_BUG_ON(engine->irq_enable || engine->irq_disable); 1120 } 1121 1122 int intel_guc_submission_enable(struct intel_guc *guc) 1123 { 1124 struct intel_gt *gt = guc_to_gt(guc); 1125 struct intel_engine_cs *engine; 1126 enum intel_engine_id id; 1127 int err; 1128 1129 err = i915_inject_probe_error(gt->i915, -ENXIO); 1130 if (err) 1131 return err; 1132 1133 /* 1134 * We're using GuC work items for submitting work through GuC. Since 1135 * we're coalescing multiple requests from a single context into a 1136 * single work item prior to assigning it to execlist_port, we can 1137 * never have more work items than the total number of ports (for all 1138 * engines). The GuC firmware is controlling the HEAD of work queue, 1139 * and it is guaranteed that it will remove the work item from the 1140 * queue before our request is completed. 1141 */ 1142 BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * 1143 sizeof(struct guc_wq_item) * 1144 I915_NUM_ENGINES > GUC_WQ_SIZE); 1145 1146 GEM_BUG_ON(!guc->execbuf_client); 1147 1148 err = guc_clients_enable(guc); 1149 if (err) 1150 return err; 1151 1152 /* Take over from manual control of ELSP (execlists) */ 1153 guc_interrupts_capture(gt); 1154 1155 for_each_engine(engine, gt, id) { 1156 engine->set_default_submission = guc_set_default_submission; 1157 engine->set_default_submission(engine); 1158 } 1159 1160 return 0; 1161 } 1162 1163 void intel_guc_submission_disable(struct intel_guc *guc) 1164 { 1165 struct intel_gt *gt = guc_to_gt(guc); 1166 1167 GEM_BUG_ON(gt->awake); /* GT should be parked first */ 1168 1169 guc_interrupts_release(gt); 1170 guc_clients_disable(guc); 1171 } 1172 1173 static bool __guc_submission_support(struct intel_guc *guc) 1174 { 1175 /* XXX: GuC submission is unavailable for now */ 1176 return false; 1177 1178 if (!intel_guc_is_supported(guc)) 1179 return false; 1180 1181 return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION; 1182 } 1183 1184 void intel_guc_submission_init_early(struct intel_guc *guc) 1185 { 1186 guc->submission_supported = __guc_submission_support(guc); 1187 } 1188 1189 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1190 #include "selftest_guc.c" 1191 #endif 1192