1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include "gem/i915_gem_context.h" 137 138 #include "i915_drv.h" 139 #include "i915_vgpu.h" 140 #include "intel_engine_pm.h" 141 #include "intel_gt.h" 142 #include "intel_lrc_reg.h" 143 #include "intel_mocs.h" 144 #include "intel_reset.h" 145 #include "intel_workarounds.h" 146 147 #define RING_EXECLIST_QFULL (1 << 0x2) 148 #define RING_EXECLIST1_VALID (1 << 0x3) 149 #define RING_EXECLIST0_VALID (1 << 0x4) 150 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 151 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 152 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 153 154 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 155 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 156 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 157 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 158 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 159 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 160 161 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 162 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) 163 164 #define CTX_DESC_FORCE_RESTORE BIT_ULL(2) 165 166 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 167 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 168 #define WA_TAIL_DWORDS 2 169 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) 170 171 struct virtual_engine { 172 struct intel_engine_cs base; 173 struct intel_context context; 174 175 /* 176 * We allow only a single request through the virtual engine at a time 177 * (each request in the timeline waits for the completion fence of 178 * the previous before being submitted). By restricting ourselves to 179 * only submitting a single request, each request is placed on to a 180 * physical to maximise load spreading (by virtue of the late greedy 181 * scheduling -- each real engine takes the next available request 182 * upon idling). 183 */ 184 struct i915_request *request; 185 186 /* 187 * We keep a rbtree of available virtual engines inside each physical 188 * engine, sorted by priority. Here we preallocate the nodes we need 189 * for the virtual engine, indexed by physical_engine->id. 190 */ 191 struct ve_node { 192 struct rb_node rb; 193 int prio; 194 } nodes[I915_NUM_ENGINES]; 195 196 /* 197 * Keep track of bonded pairs -- restrictions upon on our selection 198 * of physical engines any particular request may be submitted to. 199 * If we receive a submit-fence from a master engine, we will only 200 * use one of sibling_mask physical engines. 201 */ 202 struct ve_bond { 203 const struct intel_engine_cs *master; 204 intel_engine_mask_t sibling_mask; 205 } *bonds; 206 unsigned int num_bonds; 207 208 /* And finally, which physical engines this virtual engine maps onto. */ 209 unsigned int num_siblings; 210 struct intel_engine_cs *siblings[0]; 211 }; 212 213 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) 214 { 215 GEM_BUG_ON(!intel_engine_is_virtual(engine)); 216 return container_of(engine, struct virtual_engine, base); 217 } 218 219 static int execlists_context_deferred_alloc(struct intel_context *ce, 220 struct intel_engine_cs *engine); 221 static void execlists_init_reg_state(u32 *reg_state, 222 struct intel_context *ce, 223 struct intel_engine_cs *engine, 224 struct intel_ring *ring); 225 226 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) 227 { 228 return (i915_ggtt_offset(engine->status_page.vma) + 229 I915_GEM_HWS_PREEMPT_ADDR); 230 } 231 232 static inline void 233 ring_set_paused(const struct intel_engine_cs *engine, int state) 234 { 235 /* 236 * We inspect HWS_PREEMPT with a semaphore inside 237 * engine->emit_fini_breadcrumb. If the dword is true, 238 * the ring is paused as the semaphore will busywait 239 * until the dword is false. 240 */ 241 engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; 242 if (state) 243 wmb(); 244 } 245 246 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 247 { 248 return rb_entry(rb, struct i915_priolist, node); 249 } 250 251 static inline int rq_prio(const struct i915_request *rq) 252 { 253 return rq->sched.attr.priority; 254 } 255 256 static int effective_prio(const struct i915_request *rq) 257 { 258 int prio = rq_prio(rq); 259 260 /* 261 * If this request is special and must not be interrupted at any 262 * cost, so be it. Note we are only checking the most recent request 263 * in the context and so may be masking an earlier vip request. It 264 * is hoped that under the conditions where nopreempt is used, this 265 * will not matter (i.e. all requests to that context will be 266 * nopreempt for as long as desired). 267 */ 268 if (i915_request_has_nopreempt(rq)) 269 prio = I915_PRIORITY_UNPREEMPTABLE; 270 271 /* 272 * On unwinding the active request, we give it a priority bump 273 * if it has completed waiting on any semaphore. If we know that 274 * the request has already started, we can prevent an unwanted 275 * preempt-to-idle cycle by taking that into account now. 276 */ 277 if (__i915_request_has_started(rq)) 278 prio |= I915_PRIORITY_NOSEMAPHORE; 279 280 /* Restrict mere WAIT boosts from triggering preemption */ 281 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ 282 return prio | __NO_PREEMPTION; 283 } 284 285 static int queue_prio(const struct intel_engine_execlists *execlists) 286 { 287 struct i915_priolist *p; 288 struct rb_node *rb; 289 290 rb = rb_first_cached(&execlists->queue); 291 if (!rb) 292 return INT_MIN; 293 294 /* 295 * As the priolist[] are inverted, with the highest priority in [0], 296 * we have to flip the index value to become priority. 297 */ 298 p = to_priolist(rb); 299 return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); 300 } 301 302 static inline bool need_preempt(const struct intel_engine_cs *engine, 303 const struct i915_request *rq, 304 struct rb_node *rb) 305 { 306 int last_prio; 307 308 if (!intel_engine_has_semaphores(engine)) 309 return false; 310 311 /* 312 * Check if the current priority hint merits a preemption attempt. 313 * 314 * We record the highest value priority we saw during rescheduling 315 * prior to this dequeue, therefore we know that if it is strictly 316 * less than the current tail of ESLP[0], we do not need to force 317 * a preempt-to-idle cycle. 318 * 319 * However, the priority hint is a mere hint that we may need to 320 * preempt. If that hint is stale or we may be trying to preempt 321 * ourselves, ignore the request. 322 */ 323 last_prio = effective_prio(rq); 324 if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint, 325 last_prio)) 326 return false; 327 328 /* 329 * Check against the first request in ELSP[1], it will, thanks to the 330 * power of PI, be the highest priority of that context. 331 */ 332 if (!list_is_last(&rq->sched.link, &engine->active.requests) && 333 rq_prio(list_next_entry(rq, sched.link)) > last_prio) 334 return true; 335 336 if (rb) { 337 struct virtual_engine *ve = 338 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 339 bool preempt = false; 340 341 if (engine == ve->siblings[0]) { /* only preempt one sibling */ 342 struct i915_request *next; 343 344 rcu_read_lock(); 345 next = READ_ONCE(ve->request); 346 if (next) 347 preempt = rq_prio(next) > last_prio; 348 rcu_read_unlock(); 349 } 350 351 if (preempt) 352 return preempt; 353 } 354 355 /* 356 * If the inflight context did not trigger the preemption, then maybe 357 * it was the set of queued requests? Pick the highest priority in 358 * the queue (the first active priolist) and see if it deserves to be 359 * running instead of ELSP[0]. 360 * 361 * The highest priority request in the queue can not be either 362 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same 363 * context, it's priority would not exceed ELSP[0] aka last_prio. 364 */ 365 return queue_prio(&engine->execlists) > last_prio; 366 } 367 368 __maybe_unused static inline bool 369 assert_priority_queue(const struct i915_request *prev, 370 const struct i915_request *next) 371 { 372 /* 373 * Without preemption, the prev may refer to the still active element 374 * which we refuse to let go. 375 * 376 * Even with preemption, there are times when we think it is better not 377 * to preempt and leave an ostensibly lower priority request in flight. 378 */ 379 if (i915_request_is_active(prev)) 380 return true; 381 382 return rq_prio(prev) >= rq_prio(next); 383 } 384 385 /* 386 * The context descriptor encodes various attributes of a context, 387 * including its GTT address and some flags. Because it's fairly 388 * expensive to calculate, we'll just do it once and cache the result, 389 * which remains valid until the context is unpinned. 390 * 391 * This is what a descriptor looks like, from LSB to MSB:: 392 * 393 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) 394 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 395 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) 396 * bits 53-54: mbz, reserved for use by hardware 397 * bits 55-63: group ID, currently unused and set to 0 398 * 399 * Starting from Gen11, the upper dword of the descriptor has a new format: 400 * 401 * bits 32-36: reserved 402 * bits 37-47: SW context ID 403 * bits 48:53: engine instance 404 * bit 54: mbz, reserved for use by hardware 405 * bits 55-60: SW counter 406 * bits 61-63: engine class 407 * 408 * engine info, SW context ID and SW counter need to form a unique number 409 * (Context ID) per lrc. 410 */ 411 static u64 412 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) 413 { 414 struct i915_gem_context *ctx = ce->gem_context; 415 u64 desc; 416 417 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); 418 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH))); 419 420 desc = ctx->desc_template; /* bits 0-11 */ 421 GEM_BUG_ON(desc & GENMASK_ULL(63, 12)); 422 423 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; 424 /* bits 12-31 */ 425 GEM_BUG_ON(desc & GENMASK_ULL(63, 32)); 426 427 /* 428 * The following 32bits are copied into the OA reports (dword 2). 429 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing 430 * anything below. 431 */ 432 if (INTEL_GEN(engine->i915) >= 11) { 433 GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); 434 desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; 435 /* bits 37-47 */ 436 437 desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; 438 /* bits 48-53 */ 439 440 /* TODO: decide what to do with SW counter (bits 55-60) */ 441 442 desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; 443 /* bits 61-63 */ 444 } else { 445 GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH)); 446 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 447 } 448 449 return desc; 450 } 451 452 static void unwind_wa_tail(struct i915_request *rq) 453 { 454 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 455 assert_ring_tail_valid(rq->ring, rq->tail); 456 } 457 458 static struct i915_request * 459 __unwind_incomplete_requests(struct intel_engine_cs *engine) 460 { 461 struct i915_request *rq, *rn, *active = NULL; 462 struct list_head *uninitialized_var(pl); 463 int prio = I915_PRIORITY_INVALID; 464 465 lockdep_assert_held(&engine->active.lock); 466 467 list_for_each_entry_safe_reverse(rq, rn, 468 &engine->active.requests, 469 sched.link) { 470 struct intel_engine_cs *owner; 471 472 if (i915_request_completed(rq)) 473 continue; /* XXX */ 474 475 __i915_request_unsubmit(rq); 476 unwind_wa_tail(rq); 477 478 /* 479 * Push the request back into the queue for later resubmission. 480 * If this request is not native to this physical engine (i.e. 481 * it came from a virtual source), push it back onto the virtual 482 * engine so that it can be moved across onto another physical 483 * engine as load dictates. 484 */ 485 owner = rq->hw_context->engine; 486 if (likely(owner == engine)) { 487 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 488 if (rq_prio(rq) != prio) { 489 prio = rq_prio(rq); 490 pl = i915_sched_lookup_priolist(engine, prio); 491 } 492 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 493 494 list_move(&rq->sched.link, pl); 495 active = rq; 496 } else { 497 /* 498 * Decouple the virtual breadcrumb before moving it 499 * back to the virtual engine -- we don't want the 500 * request to complete in the background and try 501 * and cancel the breadcrumb on the virtual engine 502 * (instead of the old engine where it is linked)! 503 */ 504 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 505 &rq->fence.flags)) { 506 spin_lock(&rq->lock); 507 i915_request_cancel_breadcrumb(rq); 508 spin_unlock(&rq->lock); 509 } 510 rq->engine = owner; 511 owner->submit_request(rq); 512 active = NULL; 513 } 514 } 515 516 return active; 517 } 518 519 struct i915_request * 520 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) 521 { 522 struct intel_engine_cs *engine = 523 container_of(execlists, typeof(*engine), execlists); 524 525 return __unwind_incomplete_requests(engine); 526 } 527 528 static inline void 529 execlists_context_status_change(struct i915_request *rq, unsigned long status) 530 { 531 /* 532 * Only used when GVT-g is enabled now. When GVT-g is disabled, 533 * The compiler should eliminate this function as dead-code. 534 */ 535 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 536 return; 537 538 atomic_notifier_call_chain(&rq->engine->context_status_notifier, 539 status, rq); 540 } 541 542 static inline struct i915_request * 543 execlists_schedule_in(struct i915_request *rq, int idx) 544 { 545 struct intel_context *ce = rq->hw_context; 546 int count; 547 548 trace_i915_request_in(rq, idx); 549 550 count = intel_context_inflight_count(ce); 551 if (!count) { 552 intel_context_get(ce); 553 ce->inflight = rq->engine; 554 555 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 556 intel_engine_context_in(ce->inflight); 557 } 558 559 intel_context_inflight_inc(ce); 560 GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); 561 562 return i915_request_get(rq); 563 } 564 565 static void kick_siblings(struct i915_request *rq, struct intel_context *ce) 566 { 567 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 568 struct i915_request *next = READ_ONCE(ve->request); 569 570 if (next && next->execution_mask & ~rq->execution_mask) 571 tasklet_schedule(&ve->base.execlists.tasklet); 572 } 573 574 static inline void 575 execlists_schedule_out(struct i915_request *rq) 576 { 577 struct intel_context *ce = rq->hw_context; 578 579 GEM_BUG_ON(!intel_context_inflight_count(ce)); 580 581 trace_i915_request_out(rq); 582 583 intel_context_inflight_dec(ce); 584 if (!intel_context_inflight_count(ce)) { 585 intel_engine_context_out(ce->inflight); 586 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); 587 588 /* 589 * If this is part of a virtual engine, its next request may 590 * have been blocked waiting for access to the active context. 591 * We have to kick all the siblings again in case we need to 592 * switch (e.g. the next request is not runnable on this 593 * engine). Hopefully, we will already have submitted the next 594 * request before the tasklet runs and do not need to rebuild 595 * each virtual tree and kick everyone again. 596 */ 597 ce->inflight = NULL; 598 if (rq->engine != ce->engine) 599 kick_siblings(rq, ce); 600 601 intel_context_put(ce); 602 } 603 604 i915_request_put(rq); 605 } 606 607 static u64 execlists_update_context(const struct i915_request *rq) 608 { 609 struct intel_context *ce = rq->hw_context; 610 u64 desc; 611 612 ce->lrc_reg_state[CTX_RING_TAIL + 1] = 613 intel_ring_set_tail(rq->ring, rq->tail); 614 615 /* 616 * Make sure the context image is complete before we submit it to HW. 617 * 618 * Ostensibly, writes (including the WCB) should be flushed prior to 619 * an uncached write such as our mmio register access, the empirical 620 * evidence (esp. on Braswell) suggests that the WC write into memory 621 * may not be visible to the HW prior to the completion of the UC 622 * register write and that we may begin execution from the context 623 * before its image is complete leading to invalid PD chasing. 624 * 625 * Furthermore, Braswell, at least, wants a full mb to be sure that 626 * the writes are coherent in memory (visible to the GPU) prior to 627 * execution, and not just visible to other CPUs (as is the result of 628 * wmb). 629 */ 630 mb(); 631 632 desc = ce->lrc_desc; 633 ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; 634 635 return desc; 636 } 637 638 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 639 { 640 if (execlists->ctrl_reg) { 641 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); 642 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); 643 } else { 644 writel(upper_32_bits(desc), execlists->submit_reg); 645 writel(lower_32_bits(desc), execlists->submit_reg); 646 } 647 } 648 649 static __maybe_unused void 650 trace_ports(const struct intel_engine_execlists *execlists, 651 const char *msg, 652 struct i915_request * const *ports) 653 { 654 const struct intel_engine_cs *engine = 655 container_of(execlists, typeof(*engine), execlists); 656 657 GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", 658 engine->name, msg, 659 ports[0]->fence.context, 660 ports[0]->fence.seqno, 661 i915_request_completed(ports[0]) ? "!" : 662 i915_request_started(ports[0]) ? "*" : 663 "", 664 ports[1] ? ports[1]->fence.context : 0, 665 ports[1] ? ports[1]->fence.seqno : 0); 666 } 667 668 static __maybe_unused bool 669 assert_pending_valid(const struct intel_engine_execlists *execlists, 670 const char *msg) 671 { 672 struct i915_request * const *port, *rq; 673 struct intel_context *ce = NULL; 674 675 trace_ports(execlists, msg, execlists->pending); 676 677 if (execlists->pending[execlists_num_ports(execlists)]) 678 return false; 679 680 for (port = execlists->pending; (rq = *port); port++) { 681 if (ce == rq->hw_context) 682 return false; 683 684 ce = rq->hw_context; 685 if (i915_request_completed(rq)) 686 continue; 687 688 if (i915_active_is_idle(&ce->active)) 689 return false; 690 691 if (!i915_vma_is_pinned(ce->state)) 692 return false; 693 } 694 695 return ce; 696 } 697 698 static void execlists_submit_ports(struct intel_engine_cs *engine) 699 { 700 struct intel_engine_execlists *execlists = &engine->execlists; 701 unsigned int n; 702 703 GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); 704 705 /* 706 * We can skip acquiring intel_runtime_pm_get() here as it was taken 707 * on our behalf by the request (see i915_gem_mark_busy()) and it will 708 * not be relinquished until the device is idle (see 709 * i915_gem_idle_work_handler()). As a precaution, we make sure 710 * that all ELSP are drained i.e. we have processed the CSB, 711 * before allowing ourselves to idle and calling intel_runtime_pm_put(). 712 */ 713 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 714 715 /* 716 * ELSQ note: the submit queue is not cleared after being submitted 717 * to the HW so we need to make sure we always clean it up. This is 718 * currently ensured by the fact that we always write the same number 719 * of elsq entries, keep this in mind before changing the loop below. 720 */ 721 for (n = execlists_num_ports(execlists); n--; ) { 722 struct i915_request *rq = execlists->pending[n]; 723 724 write_desc(execlists, 725 rq ? execlists_update_context(rq) : 0, 726 n); 727 } 728 729 /* we need to manually load the submit queue */ 730 if (execlists->ctrl_reg) 731 writel(EL_CTRL_LOAD, execlists->ctrl_reg); 732 } 733 734 static bool ctx_single_port_submission(const struct intel_context *ce) 735 { 736 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 737 i915_gem_context_force_single_submission(ce->gem_context)); 738 } 739 740 static bool can_merge_ctx(const struct intel_context *prev, 741 const struct intel_context *next) 742 { 743 if (prev != next) 744 return false; 745 746 if (ctx_single_port_submission(prev)) 747 return false; 748 749 return true; 750 } 751 752 static bool can_merge_rq(const struct i915_request *prev, 753 const struct i915_request *next) 754 { 755 GEM_BUG_ON(prev == next); 756 GEM_BUG_ON(!assert_priority_queue(prev, next)); 757 758 if (!can_merge_ctx(prev->hw_context, next->hw_context)) 759 return false; 760 761 return true; 762 } 763 764 static void virtual_update_register_offsets(u32 *regs, 765 struct intel_engine_cs *engine) 766 { 767 u32 base = engine->mmio_base; 768 769 /* Must match execlists_init_reg_state()! */ 770 771 regs[CTX_CONTEXT_CONTROL] = 772 i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base)); 773 regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base)); 774 regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base)); 775 regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base)); 776 regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base)); 777 778 regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base)); 779 regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base)); 780 regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base)); 781 regs[CTX_SECOND_BB_HEAD_U] = 782 i915_mmio_reg_offset(RING_SBBADDR_UDW(base)); 783 regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base)); 784 regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base)); 785 786 regs[CTX_CTX_TIMESTAMP] = 787 i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base)); 788 regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3)); 789 regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3)); 790 regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2)); 791 regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2)); 792 regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1)); 793 regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1)); 794 regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 795 regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 796 797 if (engine->class == RENDER_CLASS) { 798 regs[CTX_RCS_INDIRECT_CTX] = 799 i915_mmio_reg_offset(RING_INDIRECT_CTX(base)); 800 regs[CTX_RCS_INDIRECT_CTX_OFFSET] = 801 i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base)); 802 regs[CTX_BB_PER_CTX_PTR] = 803 i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base)); 804 805 regs[CTX_R_PWR_CLK_STATE] = 806 i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); 807 } 808 } 809 810 static bool virtual_matches(const struct virtual_engine *ve, 811 const struct i915_request *rq, 812 const struct intel_engine_cs *engine) 813 { 814 const struct intel_engine_cs *inflight; 815 816 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ 817 return false; 818 819 /* 820 * We track when the HW has completed saving the context image 821 * (i.e. when we have seen the final CS event switching out of 822 * the context) and must not overwrite the context image before 823 * then. This restricts us to only using the active engine 824 * while the previous virtualized request is inflight (so 825 * we reuse the register offsets). This is a very small 826 * hystersis on the greedy seelction algorithm. 827 */ 828 inflight = intel_context_inflight(&ve->context); 829 if (inflight && inflight != engine) 830 return false; 831 832 return true; 833 } 834 835 static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, 836 struct intel_engine_cs *engine) 837 { 838 struct intel_engine_cs *old = ve->siblings[0]; 839 840 /* All unattached (rq->engine == old) must already be completed */ 841 842 spin_lock(&old->breadcrumbs.irq_lock); 843 if (!list_empty(&ve->context.signal_link)) { 844 list_move_tail(&ve->context.signal_link, 845 &engine->breadcrumbs.signalers); 846 intel_engine_queue_breadcrumbs(engine); 847 } 848 spin_unlock(&old->breadcrumbs.irq_lock); 849 } 850 851 static struct i915_request * 852 last_active(const struct intel_engine_execlists *execlists) 853 { 854 struct i915_request * const *last = execlists->active; 855 856 while (*last && i915_request_completed(*last)) 857 last++; 858 859 return *last; 860 } 861 862 static void defer_request(struct i915_request *rq, struct list_head * const pl) 863 { 864 LIST_HEAD(list); 865 866 /* 867 * We want to move the interrupted request to the back of 868 * the round-robin list (i.e. its priority level), but 869 * in doing so, we must then move all requests that were in 870 * flight and were waiting for the interrupted request to 871 * be run after it again. 872 */ 873 do { 874 struct i915_dependency *p; 875 876 GEM_BUG_ON(i915_request_is_active(rq)); 877 list_move_tail(&rq->sched.link, pl); 878 879 list_for_each_entry(p, &rq->sched.waiters_list, wait_link) { 880 struct i915_request *w = 881 container_of(p->waiter, typeof(*w), sched); 882 883 /* Leave semaphores spinning on the other engines */ 884 if (w->engine != rq->engine) 885 continue; 886 887 /* No waiter should start before its signaler */ 888 GEM_BUG_ON(i915_request_started(w) && 889 !i915_request_completed(rq)); 890 891 GEM_BUG_ON(i915_request_is_active(w)); 892 if (list_empty(&w->sched.link)) 893 continue; /* Not yet submitted; unready */ 894 895 if (rq_prio(w) < rq_prio(rq)) 896 continue; 897 898 GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); 899 list_move_tail(&w->sched.link, &list); 900 } 901 902 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 903 } while (rq); 904 } 905 906 static void defer_active(struct intel_engine_cs *engine) 907 { 908 struct i915_request *rq; 909 910 rq = __unwind_incomplete_requests(engine); 911 if (!rq) 912 return; 913 914 defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); 915 } 916 917 static bool 918 need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) 919 { 920 int hint; 921 922 if (!intel_engine_has_semaphores(engine)) 923 return false; 924 925 if (list_is_last(&rq->sched.link, &engine->active.requests)) 926 return false; 927 928 hint = max(rq_prio(list_next_entry(rq, sched.link)), 929 engine->execlists.queue_priority_hint); 930 931 return hint >= effective_prio(rq); 932 } 933 934 static bool 935 enable_timeslice(struct intel_engine_cs *engine) 936 { 937 struct i915_request *last = last_active(&engine->execlists); 938 939 return last && need_timeslice(engine, last); 940 } 941 942 static void record_preemption(struct intel_engine_execlists *execlists) 943 { 944 (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); 945 } 946 947 static void execlists_dequeue(struct intel_engine_cs *engine) 948 { 949 struct intel_engine_execlists * const execlists = &engine->execlists; 950 struct i915_request **port = execlists->pending; 951 struct i915_request ** const last_port = port + execlists->port_mask; 952 struct i915_request *last; 953 struct rb_node *rb; 954 bool submit = false; 955 956 /* 957 * Hardware submission is through 2 ports. Conceptually each port 958 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 959 * static for a context, and unique to each, so we only execute 960 * requests belonging to a single context from each ring. RING_HEAD 961 * is maintained by the CS in the context image, it marks the place 962 * where it got up to last time, and through RING_TAIL we tell the CS 963 * where we want to execute up to this time. 964 * 965 * In this list the requests are in order of execution. Consecutive 966 * requests from the same context are adjacent in the ringbuffer. We 967 * can combine these requests into a single RING_TAIL update: 968 * 969 * RING_HEAD...req1...req2 970 * ^- RING_TAIL 971 * since to execute req2 the CS must first execute req1. 972 * 973 * Our goal then is to point each port to the end of a consecutive 974 * sequence of requests as being the most optimal (fewest wake ups 975 * and context switches) submission. 976 */ 977 978 for (rb = rb_first_cached(&execlists->virtual); rb; ) { 979 struct virtual_engine *ve = 980 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 981 struct i915_request *rq = READ_ONCE(ve->request); 982 983 if (!rq) { /* lazily cleanup after another engine handled rq */ 984 rb_erase_cached(rb, &execlists->virtual); 985 RB_CLEAR_NODE(rb); 986 rb = rb_first_cached(&execlists->virtual); 987 continue; 988 } 989 990 if (!virtual_matches(ve, rq, engine)) { 991 rb = rb_next(rb); 992 continue; 993 } 994 995 break; 996 } 997 998 /* 999 * If the queue is higher priority than the last 1000 * request in the currently active context, submit afresh. 1001 * We will resubmit again afterwards in case we need to split 1002 * the active context to interject the preemption request, 1003 * i.e. we will retrigger preemption following the ack in case 1004 * of trouble. 1005 */ 1006 last = last_active(execlists); 1007 if (last) { 1008 if (need_preempt(engine, last, rb)) { 1009 GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n", 1010 engine->name, 1011 last->fence.context, 1012 last->fence.seqno, 1013 last->sched.attr.priority, 1014 execlists->queue_priority_hint); 1015 record_preemption(execlists); 1016 1017 /* 1018 * Don't let the RING_HEAD advance past the breadcrumb 1019 * as we unwind (and until we resubmit) so that we do 1020 * not accidentally tell it to go backwards. 1021 */ 1022 ring_set_paused(engine, 1); 1023 1024 /* 1025 * Note that we have not stopped the GPU at this point, 1026 * so we are unwinding the incomplete requests as they 1027 * remain inflight and so by the time we do complete 1028 * the preemption, some of the unwound requests may 1029 * complete! 1030 */ 1031 __unwind_incomplete_requests(engine); 1032 1033 /* 1034 * If we need to return to the preempted context, we 1035 * need to skip the lite-restore and force it to 1036 * reload the RING_TAIL. Otherwise, the HW has a 1037 * tendency to ignore us rewinding the TAIL to the 1038 * end of an earlier request. 1039 */ 1040 last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; 1041 last = NULL; 1042 } else if (need_timeslice(engine, last) && 1043 !timer_pending(&engine->execlists.timer)) { 1044 GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", 1045 engine->name, 1046 last->fence.context, 1047 last->fence.seqno, 1048 last->sched.attr.priority, 1049 execlists->queue_priority_hint); 1050 1051 ring_set_paused(engine, 1); 1052 defer_active(engine); 1053 1054 /* 1055 * Unlike for preemption, if we rewind and continue 1056 * executing the same context as previously active, 1057 * the order of execution will remain the same and 1058 * the tail will only advance. We do not need to 1059 * force a full context restore, as a lite-restore 1060 * is sufficient to resample the monotonic TAIL. 1061 * 1062 * If we switch to any other context, similarly we 1063 * will not rewind TAIL of current context, and 1064 * normal save/restore will preserve state and allow 1065 * us to later continue executing the same request. 1066 */ 1067 last = NULL; 1068 } else { 1069 /* 1070 * Otherwise if we already have a request pending 1071 * for execution after the current one, we can 1072 * just wait until the next CS event before 1073 * queuing more. In either case we will force a 1074 * lite-restore preemption event, but if we wait 1075 * we hopefully coalesce several updates into a single 1076 * submission. 1077 */ 1078 if (!list_is_last(&last->sched.link, 1079 &engine->active.requests)) 1080 return; 1081 1082 /* 1083 * WaIdleLiteRestore:bdw,skl 1084 * Apply the wa NOOPs to prevent 1085 * ring:HEAD == rq:TAIL as we resubmit the 1086 * request. See gen8_emit_fini_breadcrumb() for 1087 * where we prepare the padding after the 1088 * end of the request. 1089 */ 1090 last->tail = last->wa_tail; 1091 } 1092 } 1093 1094 while (rb) { /* XXX virtual is always taking precedence */ 1095 struct virtual_engine *ve = 1096 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 1097 struct i915_request *rq; 1098 1099 spin_lock(&ve->base.active.lock); 1100 1101 rq = ve->request; 1102 if (unlikely(!rq)) { /* lost the race to a sibling */ 1103 spin_unlock(&ve->base.active.lock); 1104 rb_erase_cached(rb, &execlists->virtual); 1105 RB_CLEAR_NODE(rb); 1106 rb = rb_first_cached(&execlists->virtual); 1107 continue; 1108 } 1109 1110 GEM_BUG_ON(rq != ve->request); 1111 GEM_BUG_ON(rq->engine != &ve->base); 1112 GEM_BUG_ON(rq->hw_context != &ve->context); 1113 1114 if (rq_prio(rq) >= queue_prio(execlists)) { 1115 if (!virtual_matches(ve, rq, engine)) { 1116 spin_unlock(&ve->base.active.lock); 1117 rb = rb_next(rb); 1118 continue; 1119 } 1120 1121 if (i915_request_completed(rq)) { 1122 ve->request = NULL; 1123 ve->base.execlists.queue_priority_hint = INT_MIN; 1124 rb_erase_cached(rb, &execlists->virtual); 1125 RB_CLEAR_NODE(rb); 1126 1127 rq->engine = engine; 1128 __i915_request_submit(rq); 1129 1130 spin_unlock(&ve->base.active.lock); 1131 1132 rb = rb_first_cached(&execlists->virtual); 1133 continue; 1134 } 1135 1136 if (last && !can_merge_rq(last, rq)) { 1137 spin_unlock(&ve->base.active.lock); 1138 return; /* leave this for another */ 1139 } 1140 1141 GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", 1142 engine->name, 1143 rq->fence.context, 1144 rq->fence.seqno, 1145 i915_request_completed(rq) ? "!" : 1146 i915_request_started(rq) ? "*" : 1147 "", 1148 yesno(engine != ve->siblings[0])); 1149 1150 ve->request = NULL; 1151 ve->base.execlists.queue_priority_hint = INT_MIN; 1152 rb_erase_cached(rb, &execlists->virtual); 1153 RB_CLEAR_NODE(rb); 1154 1155 GEM_BUG_ON(!(rq->execution_mask & engine->mask)); 1156 rq->engine = engine; 1157 1158 if (engine != ve->siblings[0]) { 1159 u32 *regs = ve->context.lrc_reg_state; 1160 unsigned int n; 1161 1162 GEM_BUG_ON(READ_ONCE(ve->context.inflight)); 1163 virtual_update_register_offsets(regs, engine); 1164 1165 if (!list_empty(&ve->context.signals)) 1166 virtual_xfer_breadcrumbs(ve, engine); 1167 1168 /* 1169 * Move the bound engine to the top of the list 1170 * for future execution. We then kick this 1171 * tasklet first before checking others, so that 1172 * we preferentially reuse this set of bound 1173 * registers. 1174 */ 1175 for (n = 1; n < ve->num_siblings; n++) { 1176 if (ve->siblings[n] == engine) { 1177 swap(ve->siblings[n], 1178 ve->siblings[0]); 1179 break; 1180 } 1181 } 1182 1183 GEM_BUG_ON(ve->siblings[0] != engine); 1184 } 1185 1186 __i915_request_submit(rq); 1187 if (!i915_request_completed(rq)) { 1188 submit = true; 1189 last = rq; 1190 } 1191 } 1192 1193 spin_unlock(&ve->base.active.lock); 1194 break; 1195 } 1196 1197 while ((rb = rb_first_cached(&execlists->queue))) { 1198 struct i915_priolist *p = to_priolist(rb); 1199 struct i915_request *rq, *rn; 1200 int i; 1201 1202 priolist_for_each_request_consume(rq, rn, p, i) { 1203 if (i915_request_completed(rq)) 1204 goto skip; 1205 1206 /* 1207 * Can we combine this request with the current port? 1208 * It has to be the same context/ringbuffer and not 1209 * have any exceptions (e.g. GVT saying never to 1210 * combine contexts). 1211 * 1212 * If we can combine the requests, we can execute both 1213 * by updating the RING_TAIL to point to the end of the 1214 * second request, and so we never need to tell the 1215 * hardware about the first. 1216 */ 1217 if (last && !can_merge_rq(last, rq)) { 1218 /* 1219 * If we are on the second port and cannot 1220 * combine this request with the last, then we 1221 * are done. 1222 */ 1223 if (port == last_port) 1224 goto done; 1225 1226 /* 1227 * We must not populate both ELSP[] with the 1228 * same LRCA, i.e. we must submit 2 different 1229 * contexts if we submit 2 ELSP. 1230 */ 1231 if (last->hw_context == rq->hw_context) 1232 goto done; 1233 1234 /* 1235 * If GVT overrides us we only ever submit 1236 * port[0], leaving port[1] empty. Note that we 1237 * also have to be careful that we don't queue 1238 * the same context (even though a different 1239 * request) to the second port. 1240 */ 1241 if (ctx_single_port_submission(last->hw_context) || 1242 ctx_single_port_submission(rq->hw_context)) 1243 goto done; 1244 1245 *port = execlists_schedule_in(last, port - execlists->pending); 1246 port++; 1247 } 1248 1249 last = rq; 1250 submit = true; 1251 skip: 1252 __i915_request_submit(rq); 1253 } 1254 1255 rb_erase_cached(&p->node, &execlists->queue); 1256 i915_priolist_free(p); 1257 } 1258 1259 done: 1260 /* 1261 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. 1262 * 1263 * We choose the priority hint such that if we add a request of greater 1264 * priority than this, we kick the submission tasklet to decide on 1265 * the right order of submitting the requests to hardware. We must 1266 * also be prepared to reorder requests as they are in-flight on the 1267 * HW. We derive the priority hint then as the first "hole" in 1268 * the HW submission ports and if there are no available slots, 1269 * the priority of the lowest executing request, i.e. last. 1270 * 1271 * When we do receive a higher priority request ready to run from the 1272 * user, see queue_request(), the priority hint is bumped to that 1273 * request triggering preemption on the next dequeue (or subsequent 1274 * interrupt for secondary ports). 1275 */ 1276 execlists->queue_priority_hint = queue_prio(execlists); 1277 GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n", 1278 engine->name, execlists->queue_priority_hint, 1279 yesno(submit)); 1280 1281 if (submit) { 1282 *port = execlists_schedule_in(last, port - execlists->pending); 1283 memset(port + 1, 0, (last_port - port) * sizeof(*port)); 1284 execlists_submit_ports(engine); 1285 } else { 1286 ring_set_paused(engine, 0); 1287 } 1288 } 1289 1290 void 1291 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) 1292 { 1293 struct i915_request * const *port, *rq; 1294 1295 for (port = execlists->pending; (rq = *port); port++) 1296 execlists_schedule_out(rq); 1297 memset(execlists->pending, 0, sizeof(execlists->pending)); 1298 1299 for (port = execlists->active; (rq = *port); port++) 1300 execlists_schedule_out(rq); 1301 execlists->active = 1302 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 1303 } 1304 1305 static inline void 1306 invalidate_csb_entries(const u32 *first, const u32 *last) 1307 { 1308 clflush((void *)first); 1309 clflush((void *)last); 1310 } 1311 1312 static inline bool 1313 reset_in_progress(const struct intel_engine_execlists *execlists) 1314 { 1315 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); 1316 } 1317 1318 enum csb_step { 1319 CSB_NOP, 1320 CSB_PROMOTE, 1321 CSB_PREEMPT, 1322 CSB_COMPLETE, 1323 }; 1324 1325 static inline enum csb_step 1326 csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) 1327 { 1328 unsigned int status = *csb; 1329 1330 if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) 1331 return CSB_PROMOTE; 1332 1333 if (status & GEN8_CTX_STATUS_PREEMPTED) 1334 return CSB_PREEMPT; 1335 1336 if (*execlists->active) 1337 return CSB_COMPLETE; 1338 1339 return CSB_NOP; 1340 } 1341 1342 static void process_csb(struct intel_engine_cs *engine) 1343 { 1344 struct intel_engine_execlists * const execlists = &engine->execlists; 1345 const u32 * const buf = execlists->csb_status; 1346 const u8 num_entries = execlists->csb_size; 1347 u8 head, tail; 1348 1349 lockdep_assert_held(&engine->active.lock); 1350 GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915)); 1351 1352 /* 1353 * Note that csb_write, csb_status may be either in HWSP or mmio. 1354 * When reading from the csb_write mmio register, we have to be 1355 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is 1356 * the low 4bits. As it happens we know the next 4bits are always 1357 * zero and so we can simply masked off the low u8 of the register 1358 * and treat it identically to reading from the HWSP (without having 1359 * to use explicit shifting and masking, and probably bifurcating 1360 * the code to handle the legacy mmio read). 1361 */ 1362 head = execlists->csb_head; 1363 tail = READ_ONCE(*execlists->csb_write); 1364 GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); 1365 if (unlikely(head == tail)) 1366 return; 1367 1368 /* 1369 * Hopefully paired with a wmb() in HW! 1370 * 1371 * We must complete the read of the write pointer before any reads 1372 * from the CSB, so that we do not see stale values. Without an rmb 1373 * (lfence) the HW may speculatively perform the CSB[] reads *before* 1374 * we perform the READ_ONCE(*csb_write). 1375 */ 1376 rmb(); 1377 1378 do { 1379 if (++head == num_entries) 1380 head = 0; 1381 1382 /* 1383 * We are flying near dragons again. 1384 * 1385 * We hold a reference to the request in execlist_port[] 1386 * but no more than that. We are operating in softirq 1387 * context and so cannot hold any mutex or sleep. That 1388 * prevents us stopping the requests we are processing 1389 * in port[] from being retired simultaneously (the 1390 * breadcrumb will be complete before we see the 1391 * context-switch). As we only hold the reference to the 1392 * request, any pointer chasing underneath the request 1393 * is subject to a potential use-after-free. Thus we 1394 * store all of the bookkeeping within port[] as 1395 * required, and avoid using unguarded pointers beneath 1396 * request itself. The same applies to the atomic 1397 * status notifier. 1398 */ 1399 1400 GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n", 1401 engine->name, head, 1402 buf[2 * head + 0], buf[2 * head + 1]); 1403 1404 switch (csb_parse(execlists, buf + 2 * head)) { 1405 case CSB_PREEMPT: /* cancel old inflight, prepare for switch */ 1406 trace_ports(execlists, "preempted", execlists->active); 1407 1408 while (*execlists->active) 1409 execlists_schedule_out(*execlists->active++); 1410 1411 /* fallthrough */ 1412 case CSB_PROMOTE: /* switch pending to inflight */ 1413 GEM_BUG_ON(*execlists->active); 1414 GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); 1415 execlists->active = 1416 memcpy(execlists->inflight, 1417 execlists->pending, 1418 execlists_num_ports(execlists) * 1419 sizeof(*execlists->pending)); 1420 execlists->pending[0] = NULL; 1421 1422 trace_ports(execlists, "promoted", execlists->active); 1423 1424 if (enable_timeslice(engine)) 1425 mod_timer(&execlists->timer, jiffies + 1); 1426 1427 if (!inject_preempt_hang(execlists)) 1428 ring_set_paused(engine, 0); 1429 break; 1430 1431 case CSB_COMPLETE: /* port0 completed, advanced to port1 */ 1432 trace_ports(execlists, "completed", execlists->active); 1433 1434 /* 1435 * We rely on the hardware being strongly 1436 * ordered, that the breadcrumb write is 1437 * coherent (visible from the CPU) before the 1438 * user interrupt and CSB is processed. 1439 */ 1440 GEM_BUG_ON(!i915_request_completed(*execlists->active) && 1441 !reset_in_progress(execlists)); 1442 execlists_schedule_out(*execlists->active++); 1443 1444 GEM_BUG_ON(execlists->active - execlists->inflight > 1445 execlists_num_ports(execlists)); 1446 break; 1447 1448 case CSB_NOP: 1449 break; 1450 } 1451 } while (head != tail); 1452 1453 execlists->csb_head = head; 1454 1455 /* 1456 * Gen11 has proven to fail wrt global observation point between 1457 * entry and tail update, failing on the ordering and thus 1458 * we see an old entry in the context status buffer. 1459 * 1460 * Forcibly evict out entries for the next gpu csb update, 1461 * to increase the odds that we get a fresh entries with non 1462 * working hardware. The cost for doing so comes out mostly with 1463 * the wash as hardware, working or not, will need to do the 1464 * invalidation before. 1465 */ 1466 invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); 1467 } 1468 1469 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 1470 { 1471 lockdep_assert_held(&engine->active.lock); 1472 1473 process_csb(engine); 1474 if (!engine->execlists.pending[0]) 1475 execlists_dequeue(engine); 1476 } 1477 1478 /* 1479 * Check the unread Context Status Buffers and manage the submission of new 1480 * contexts to the ELSP accordingly. 1481 */ 1482 static void execlists_submission_tasklet(unsigned long data) 1483 { 1484 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 1485 unsigned long flags; 1486 1487 spin_lock_irqsave(&engine->active.lock, flags); 1488 __execlists_submission_tasklet(engine); 1489 spin_unlock_irqrestore(&engine->active.lock, flags); 1490 } 1491 1492 static void execlists_submission_timer(struct timer_list *timer) 1493 { 1494 struct intel_engine_cs *engine = 1495 from_timer(engine, timer, execlists.timer); 1496 1497 /* Kick the tasklet for some interrupt coalescing and reset handling */ 1498 tasklet_hi_schedule(&engine->execlists.tasklet); 1499 } 1500 1501 static void queue_request(struct intel_engine_cs *engine, 1502 struct i915_sched_node *node, 1503 int prio) 1504 { 1505 GEM_BUG_ON(!list_empty(&node->link)); 1506 list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); 1507 } 1508 1509 static void __submit_queue_imm(struct intel_engine_cs *engine) 1510 { 1511 struct intel_engine_execlists * const execlists = &engine->execlists; 1512 1513 if (reset_in_progress(execlists)) 1514 return; /* defer until we restart the engine following reset */ 1515 1516 if (execlists->tasklet.func == execlists_submission_tasklet) 1517 __execlists_submission_tasklet(engine); 1518 else 1519 tasklet_hi_schedule(&execlists->tasklet); 1520 } 1521 1522 static void submit_queue(struct intel_engine_cs *engine, 1523 const struct i915_request *rq) 1524 { 1525 struct intel_engine_execlists *execlists = &engine->execlists; 1526 1527 if (rq_prio(rq) <= execlists->queue_priority_hint) 1528 return; 1529 1530 execlists->queue_priority_hint = rq_prio(rq); 1531 __submit_queue_imm(engine); 1532 } 1533 1534 static void execlists_submit_request(struct i915_request *request) 1535 { 1536 struct intel_engine_cs *engine = request->engine; 1537 unsigned long flags; 1538 1539 /* Will be called from irq-context when using foreign fences. */ 1540 spin_lock_irqsave(&engine->active.lock, flags); 1541 1542 queue_request(engine, &request->sched, rq_prio(request)); 1543 1544 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 1545 GEM_BUG_ON(list_empty(&request->sched.link)); 1546 1547 submit_queue(engine, request); 1548 1549 spin_unlock_irqrestore(&engine->active.lock, flags); 1550 } 1551 1552 static void __execlists_context_fini(struct intel_context *ce) 1553 { 1554 intel_ring_put(ce->ring); 1555 i915_vma_put(ce->state); 1556 } 1557 1558 static void execlists_context_destroy(struct kref *kref) 1559 { 1560 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 1561 1562 GEM_BUG_ON(!i915_active_is_idle(&ce->active)); 1563 GEM_BUG_ON(intel_context_is_pinned(ce)); 1564 1565 if (ce->state) 1566 __execlists_context_fini(ce); 1567 1568 intel_context_fini(ce); 1569 intel_context_free(ce); 1570 } 1571 1572 static void execlists_context_unpin(struct intel_context *ce) 1573 { 1574 i915_gem_context_unpin_hw_id(ce->gem_context); 1575 i915_gem_object_unpin_map(ce->state->obj); 1576 } 1577 1578 static void 1579 __execlists_update_reg_state(struct intel_context *ce, 1580 struct intel_engine_cs *engine) 1581 { 1582 struct intel_ring *ring = ce->ring; 1583 u32 *regs = ce->lrc_reg_state; 1584 1585 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 1586 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 1587 1588 regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); 1589 regs[CTX_RING_HEAD + 1] = ring->head; 1590 regs[CTX_RING_TAIL + 1] = ring->tail; 1591 1592 /* RPCS */ 1593 if (engine->class == RENDER_CLASS) { 1594 regs[CTX_R_PWR_CLK_STATE + 1] = 1595 intel_sseu_make_rpcs(engine->i915, &ce->sseu); 1596 1597 i915_oa_init_reg_state(engine, ce, regs); 1598 } 1599 } 1600 1601 static int 1602 __execlists_context_pin(struct intel_context *ce, 1603 struct intel_engine_cs *engine) 1604 { 1605 void *vaddr; 1606 int ret; 1607 1608 ret = execlists_context_deferred_alloc(ce, engine); 1609 if (ret) 1610 goto err; 1611 GEM_BUG_ON(!ce->state); 1612 1613 ret = intel_context_active_acquire(ce); 1614 if (ret) 1615 goto err; 1616 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 1617 1618 vaddr = i915_gem_object_pin_map(ce->state->obj, 1619 i915_coherent_map_type(engine->i915) | 1620 I915_MAP_OVERRIDE); 1621 if (IS_ERR(vaddr)) { 1622 ret = PTR_ERR(vaddr); 1623 goto unpin_active; 1624 } 1625 1626 ret = i915_gem_context_pin_hw_id(ce->gem_context); 1627 if (ret) 1628 goto unpin_map; 1629 1630 ce->lrc_desc = lrc_descriptor(ce, engine); 1631 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 1632 __execlists_update_reg_state(ce, engine); 1633 1634 return 0; 1635 1636 unpin_map: 1637 i915_gem_object_unpin_map(ce->state->obj); 1638 unpin_active: 1639 intel_context_active_release(ce); 1640 err: 1641 return ret; 1642 } 1643 1644 static int execlists_context_pin(struct intel_context *ce) 1645 { 1646 return __execlists_context_pin(ce, ce->engine); 1647 } 1648 1649 static void execlists_context_reset(struct intel_context *ce) 1650 { 1651 /* 1652 * Because we emit WA_TAIL_DWORDS there may be a disparity 1653 * between our bookkeeping in ce->ring->head and ce->ring->tail and 1654 * that stored in context. As we only write new commands from 1655 * ce->ring->tail onwards, everything before that is junk. If the GPU 1656 * starts reading from its RING_HEAD from the context, it may try to 1657 * execute that junk and die. 1658 * 1659 * The contexts that are stilled pinned on resume belong to the 1660 * kernel, and are local to each engine. All other contexts will 1661 * have their head/tail sanitized upon pinning before use, so they 1662 * will never see garbage, 1663 * 1664 * So to avoid that we reset the context images upon resume. For 1665 * simplicity, we just zero everything out. 1666 */ 1667 intel_ring_reset(ce->ring, 0); 1668 __execlists_update_reg_state(ce, ce->engine); 1669 } 1670 1671 static const struct intel_context_ops execlists_context_ops = { 1672 .pin = execlists_context_pin, 1673 .unpin = execlists_context_unpin, 1674 1675 .enter = intel_context_enter_engine, 1676 .exit = intel_context_exit_engine, 1677 1678 .reset = execlists_context_reset, 1679 .destroy = execlists_context_destroy, 1680 }; 1681 1682 static int gen8_emit_init_breadcrumb(struct i915_request *rq) 1683 { 1684 u32 *cs; 1685 1686 GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); 1687 1688 cs = intel_ring_begin(rq, 6); 1689 if (IS_ERR(cs)) 1690 return PTR_ERR(cs); 1691 1692 /* 1693 * Check if we have been preempted before we even get started. 1694 * 1695 * After this point i915_request_started() reports true, even if 1696 * we get preempted and so are no longer running. 1697 */ 1698 *cs++ = MI_ARB_CHECK; 1699 *cs++ = MI_NOOP; 1700 1701 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1702 *cs++ = rq->timeline->hwsp_offset; 1703 *cs++ = 0; 1704 *cs++ = rq->fence.seqno - 1; 1705 1706 intel_ring_advance(rq, cs); 1707 1708 /* Record the updated position of the request's payload */ 1709 rq->infix = intel_ring_offset(rq, cs); 1710 1711 return 0; 1712 } 1713 1714 static int emit_pdps(struct i915_request *rq) 1715 { 1716 const struct intel_engine_cs * const engine = rq->engine; 1717 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm); 1718 int err, i; 1719 u32 *cs; 1720 1721 GEM_BUG_ON(intel_vgpu_active(rq->i915)); 1722 1723 /* 1724 * Beware ye of the dragons, this sequence is magic! 1725 * 1726 * Small changes to this sequence can cause anything from 1727 * GPU hangs to forcewake errors and machine lockups! 1728 */ 1729 1730 /* Flush any residual operations from the context load */ 1731 err = engine->emit_flush(rq, EMIT_FLUSH); 1732 if (err) 1733 return err; 1734 1735 /* Magic required to prevent forcewake errors! */ 1736 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1737 if (err) 1738 return err; 1739 1740 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1741 if (IS_ERR(cs)) 1742 return PTR_ERR(cs); 1743 1744 /* Ensure the LRI have landed before we invalidate & continue */ 1745 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1746 for (i = GEN8_3LVL_PDPES; i--; ) { 1747 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1748 u32 base = engine->mmio_base; 1749 1750 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1751 *cs++ = upper_32_bits(pd_daddr); 1752 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1753 *cs++ = lower_32_bits(pd_daddr); 1754 } 1755 *cs++ = MI_NOOP; 1756 1757 intel_ring_advance(rq, cs); 1758 1759 /* Be doubly sure the LRI have landed before proceeding */ 1760 err = engine->emit_flush(rq, EMIT_FLUSH); 1761 if (err) 1762 return err; 1763 1764 /* Re-invalidate the TLB for luck */ 1765 return engine->emit_flush(rq, EMIT_INVALIDATE); 1766 } 1767 1768 static int execlists_request_alloc(struct i915_request *request) 1769 { 1770 int ret; 1771 1772 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); 1773 1774 /* 1775 * Flush enough space to reduce the likelihood of waiting after 1776 * we start building the request - in which case we will just 1777 * have to repeat work. 1778 */ 1779 request->reserved_space += EXECLISTS_REQUEST_SIZE; 1780 1781 /* 1782 * Note that after this point, we have committed to using 1783 * this request as it is being used to both track the 1784 * state of engine initialisation and liveness of the 1785 * golden renderstate above. Think twice before you try 1786 * to cancel/unwind this request now. 1787 */ 1788 1789 /* Unconditionally invalidate GPU caches and TLBs. */ 1790 if (i915_vm_is_4lvl(request->hw_context->vm)) 1791 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1792 else 1793 ret = emit_pdps(request); 1794 if (ret) 1795 return ret; 1796 1797 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 1798 return 0; 1799 } 1800 1801 /* 1802 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1803 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 1804 * but there is a slight complication as this is applied in WA batch where the 1805 * values are only initialized once so we cannot take register value at the 1806 * beginning and reuse it further; hence we save its value to memory, upload a 1807 * constant value with bit21 set and then we restore it back with the saved value. 1808 * To simplify the WA, a constant value is formed by using the default value 1809 * of this register. This shouldn't be a problem because we are only modifying 1810 * it for a short period and this batch in non-premptible. We can ofcourse 1811 * use additional instructions that read the actual value of the register 1812 * at that time and set our bit of interest but it makes the WA complicated. 1813 * 1814 * This WA is also required for Gen9 so extracting as a function avoids 1815 * code duplication. 1816 */ 1817 static u32 * 1818 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 1819 { 1820 /* NB no one else is allowed to scribble over scratch + 256! */ 1821 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1822 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1823 *batch++ = intel_gt_scratch_offset(engine->gt, 1824 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 1825 *batch++ = 0; 1826 1827 *batch++ = MI_LOAD_REGISTER_IMM(1); 1828 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1829 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; 1830 1831 batch = gen8_emit_pipe_control(batch, 1832 PIPE_CONTROL_CS_STALL | 1833 PIPE_CONTROL_DC_FLUSH_ENABLE, 1834 0); 1835 1836 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1837 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1838 *batch++ = intel_gt_scratch_offset(engine->gt, 1839 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 1840 *batch++ = 0; 1841 1842 return batch; 1843 } 1844 1845 static u32 slm_offset(struct intel_engine_cs *engine) 1846 { 1847 return intel_gt_scratch_offset(engine->gt, 1848 INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA); 1849 } 1850 1851 /* 1852 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 1853 * initialized at the beginning and shared across all contexts but this field 1854 * helps us to have multiple batches at different offsets and select them based 1855 * on a criteria. At the moment this batch always start at the beginning of the page 1856 * and at this point we don't have multiple wa_ctx batch buffers. 1857 * 1858 * The number of WA applied are not known at the beginning; we use this field 1859 * to return the no of DWORDS written. 1860 * 1861 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 1862 * so it adds NOOPs as padding to make it cacheline aligned. 1863 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 1864 * makes a complete batch buffer. 1865 */ 1866 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1867 { 1868 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1869 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1870 1871 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1872 if (IS_BROADWELL(engine->i915)) 1873 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1874 1875 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1876 /* Actual scratch location is at 128 bytes offset */ 1877 batch = gen8_emit_pipe_control(batch, 1878 PIPE_CONTROL_FLUSH_L3 | 1879 PIPE_CONTROL_GLOBAL_GTT_IVB | 1880 PIPE_CONTROL_CS_STALL | 1881 PIPE_CONTROL_QW_WRITE, 1882 slm_offset(engine)); 1883 1884 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1885 1886 /* Pad to end of cacheline */ 1887 while ((unsigned long)batch % CACHELINE_BYTES) 1888 *batch++ = MI_NOOP; 1889 1890 /* 1891 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 1892 * execution depends on the length specified in terms of cache lines 1893 * in the register CTX_RCS_INDIRECT_CTX 1894 */ 1895 1896 return batch; 1897 } 1898 1899 struct lri { 1900 i915_reg_t reg; 1901 u32 value; 1902 }; 1903 1904 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) 1905 { 1906 GEM_BUG_ON(!count || count > 63); 1907 1908 *batch++ = MI_LOAD_REGISTER_IMM(count); 1909 do { 1910 *batch++ = i915_mmio_reg_offset(lri->reg); 1911 *batch++ = lri->value; 1912 } while (lri++, --count); 1913 *batch++ = MI_NOOP; 1914 1915 return batch; 1916 } 1917 1918 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1919 { 1920 static const struct lri lri[] = { 1921 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1922 { 1923 COMMON_SLICE_CHICKEN2, 1924 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, 1925 0), 1926 }, 1927 1928 /* BSpec: 11391 */ 1929 { 1930 FF_SLICE_CHICKEN, 1931 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, 1932 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), 1933 }, 1934 1935 /* BSpec: 11299 */ 1936 { 1937 _3D_CHICKEN3, 1938 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, 1939 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), 1940 } 1941 }; 1942 1943 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1944 1945 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1946 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1947 1948 batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); 1949 1950 /* WaMediaPoolStateCmdInWABB:bxt,glk */ 1951 if (HAS_POOLED_EU(engine->i915)) { 1952 /* 1953 * EU pool configuration is setup along with golden context 1954 * during context initialization. This value depends on 1955 * device type (2x6 or 3x6) and needs to be updated based 1956 * on which subslice is disabled especially for 2x6 1957 * devices, however it is safe to load default 1958 * configuration of 3x6 device instead of masking off 1959 * corresponding bits because HW ignores bits of a disabled 1960 * subslice and drops down to appropriate config. Please 1961 * see render_state_setup() in i915_gem_render_state.c for 1962 * possible configurations, to avoid duplication they are 1963 * not shown here again. 1964 */ 1965 *batch++ = GEN9_MEDIA_POOL_STATE; 1966 *batch++ = GEN9_MEDIA_POOL_ENABLE; 1967 *batch++ = 0x00777000; 1968 *batch++ = 0; 1969 *batch++ = 0; 1970 *batch++ = 0; 1971 } 1972 1973 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1974 1975 /* Pad to end of cacheline */ 1976 while ((unsigned long)batch % CACHELINE_BYTES) 1977 *batch++ = MI_NOOP; 1978 1979 return batch; 1980 } 1981 1982 static u32 * 1983 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1984 { 1985 int i; 1986 1987 /* 1988 * WaPipeControlBefore3DStateSamplePattern: cnl 1989 * 1990 * Ensure the engine is idle prior to programming a 1991 * 3DSTATE_SAMPLE_PATTERN during a context restore. 1992 */ 1993 batch = gen8_emit_pipe_control(batch, 1994 PIPE_CONTROL_CS_STALL, 1995 0); 1996 /* 1997 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for 1998 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in 1999 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is 2000 * confusing. Since gen8_emit_pipe_control() already advances the 2001 * batch by 6 dwords, we advance the other 10 here, completing a 2002 * cacheline. It's not clear if the workaround requires this padding 2003 * before other commands, or if it's just the regular padding we would 2004 * already have for the workaround bb, so leave it here for now. 2005 */ 2006 for (i = 0; i < 10; i++) 2007 *batch++ = MI_NOOP; 2008 2009 /* Pad to end of cacheline */ 2010 while ((unsigned long)batch % CACHELINE_BYTES) 2011 *batch++ = MI_NOOP; 2012 2013 return batch; 2014 } 2015 2016 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) 2017 2018 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) 2019 { 2020 struct drm_i915_gem_object *obj; 2021 struct i915_vma *vma; 2022 int err; 2023 2024 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); 2025 if (IS_ERR(obj)) 2026 return PTR_ERR(obj); 2027 2028 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 2029 if (IS_ERR(vma)) { 2030 err = PTR_ERR(vma); 2031 goto err; 2032 } 2033 2034 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 2035 if (err) 2036 goto err; 2037 2038 engine->wa_ctx.vma = vma; 2039 return 0; 2040 2041 err: 2042 i915_gem_object_put(obj); 2043 return err; 2044 } 2045 2046 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) 2047 { 2048 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 2049 } 2050 2051 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); 2052 2053 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 2054 { 2055 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 2056 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, 2057 &wa_ctx->per_ctx }; 2058 wa_bb_func_t wa_bb_fn[2]; 2059 struct page *page; 2060 void *batch, *batch_ptr; 2061 unsigned int i; 2062 int ret; 2063 2064 if (engine->class != RENDER_CLASS) 2065 return 0; 2066 2067 switch (INTEL_GEN(engine->i915)) { 2068 case 11: 2069 return 0; 2070 case 10: 2071 wa_bb_fn[0] = gen10_init_indirectctx_bb; 2072 wa_bb_fn[1] = NULL; 2073 break; 2074 case 9: 2075 wa_bb_fn[0] = gen9_init_indirectctx_bb; 2076 wa_bb_fn[1] = NULL; 2077 break; 2078 case 8: 2079 wa_bb_fn[0] = gen8_init_indirectctx_bb; 2080 wa_bb_fn[1] = NULL; 2081 break; 2082 default: 2083 MISSING_CASE(INTEL_GEN(engine->i915)); 2084 return 0; 2085 } 2086 2087 ret = lrc_setup_wa_ctx(engine); 2088 if (ret) { 2089 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 2090 return ret; 2091 } 2092 2093 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); 2094 batch = batch_ptr = kmap_atomic(page); 2095 2096 /* 2097 * Emit the two workaround batch buffers, recording the offset from the 2098 * start of the workaround batch buffer object for each and their 2099 * respective sizes. 2100 */ 2101 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 2102 wa_bb[i]->offset = batch_ptr - batch; 2103 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 2104 CACHELINE_BYTES))) { 2105 ret = -EINVAL; 2106 break; 2107 } 2108 if (wa_bb_fn[i]) 2109 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 2110 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 2111 } 2112 2113 BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); 2114 2115 kunmap_atomic(batch); 2116 if (ret) 2117 lrc_destroy_wa_ctx(engine); 2118 2119 return ret; 2120 } 2121 2122 static void enable_execlists(struct intel_engine_cs *engine) 2123 { 2124 u32 mode; 2125 2126 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); 2127 2128 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ 2129 2130 if (INTEL_GEN(engine->i915) >= 11) 2131 mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); 2132 else 2133 mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); 2134 ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); 2135 2136 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 2137 2138 ENGINE_WRITE_FW(engine, 2139 RING_HWS_PGA, 2140 i915_ggtt_offset(engine->status_page.vma)); 2141 ENGINE_POSTING_READ(engine, RING_HWS_PGA); 2142 } 2143 2144 static bool unexpected_starting_state(struct intel_engine_cs *engine) 2145 { 2146 bool unexpected = false; 2147 2148 if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { 2149 DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); 2150 unexpected = true; 2151 } 2152 2153 return unexpected; 2154 } 2155 2156 static int execlists_resume(struct intel_engine_cs *engine) 2157 { 2158 intel_engine_apply_workarounds(engine); 2159 intel_engine_apply_whitelist(engine); 2160 2161 intel_mocs_init_engine(engine); 2162 2163 intel_engine_reset_breadcrumbs(engine); 2164 2165 if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { 2166 struct drm_printer p = drm_debug_printer(__func__); 2167 2168 intel_engine_dump(engine, &p, NULL); 2169 } 2170 2171 enable_execlists(engine); 2172 2173 return 0; 2174 } 2175 2176 static void execlists_reset_prepare(struct intel_engine_cs *engine) 2177 { 2178 struct intel_engine_execlists * const execlists = &engine->execlists; 2179 unsigned long flags; 2180 2181 GEM_TRACE("%s: depth<-%d\n", engine->name, 2182 atomic_read(&execlists->tasklet.count)); 2183 2184 /* 2185 * Prevent request submission to the hardware until we have 2186 * completed the reset in i915_gem_reset_finish(). If a request 2187 * is completed by one engine, it may then queue a request 2188 * to a second via its execlists->tasklet *just* as we are 2189 * calling engine->resume() and also writing the ELSP. 2190 * Turning off the execlists->tasklet until the reset is over 2191 * prevents the race. 2192 */ 2193 __tasklet_disable_sync_once(&execlists->tasklet); 2194 GEM_BUG_ON(!reset_in_progress(execlists)); 2195 2196 /* And flush any current direct submission. */ 2197 spin_lock_irqsave(&engine->active.lock, flags); 2198 spin_unlock_irqrestore(&engine->active.lock, flags); 2199 2200 /* 2201 * We stop engines, otherwise we might get failed reset and a 2202 * dead gpu (on elk). Also as modern gpu as kbl can suffer 2203 * from system hang if batchbuffer is progressing when 2204 * the reset is issued, regardless of READY_TO_RESET ack. 2205 * Thus assume it is best to stop engines on all gens 2206 * where we have a gpu reset. 2207 * 2208 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 2209 * 2210 * FIXME: Wa for more modern gens needs to be validated 2211 */ 2212 intel_engine_stop_cs(engine); 2213 } 2214 2215 static void reset_csb_pointers(struct intel_engine_cs *engine) 2216 { 2217 struct intel_engine_execlists * const execlists = &engine->execlists; 2218 const unsigned int reset_value = execlists->csb_size - 1; 2219 2220 ring_set_paused(engine, 0); 2221 2222 /* 2223 * After a reset, the HW starts writing into CSB entry [0]. We 2224 * therefore have to set our HEAD pointer back one entry so that 2225 * the *first* entry we check is entry 0. To complicate this further, 2226 * as we don't wait for the first interrupt after reset, we have to 2227 * fake the HW write to point back to the last entry so that our 2228 * inline comparison of our cached head position against the last HW 2229 * write works even before the first interrupt. 2230 */ 2231 execlists->csb_head = reset_value; 2232 WRITE_ONCE(*execlists->csb_write, reset_value); 2233 wmb(); /* Make sure this is visible to HW (paranoia?) */ 2234 2235 invalidate_csb_entries(&execlists->csb_status[0], 2236 &execlists->csb_status[reset_value]); 2237 } 2238 2239 static struct i915_request *active_request(struct i915_request *rq) 2240 { 2241 const struct list_head * const list = &rq->engine->active.requests; 2242 const struct intel_context * const context = rq->hw_context; 2243 struct i915_request *active = NULL; 2244 2245 list_for_each_entry_from_reverse(rq, list, sched.link) { 2246 if (i915_request_completed(rq)) 2247 break; 2248 2249 if (rq->hw_context != context) 2250 break; 2251 2252 active = rq; 2253 } 2254 2255 return active; 2256 } 2257 2258 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) 2259 { 2260 struct intel_engine_execlists * const execlists = &engine->execlists; 2261 struct intel_context *ce; 2262 struct i915_request *rq; 2263 u32 *regs; 2264 2265 process_csb(engine); /* drain preemption events */ 2266 2267 /* Following the reset, we need to reload the CSB read/write pointers */ 2268 reset_csb_pointers(engine); 2269 2270 /* 2271 * Save the currently executing context, even if we completed 2272 * its request, it was still running at the time of the 2273 * reset and will have been clobbered. 2274 */ 2275 rq = execlists_active(execlists); 2276 if (!rq) 2277 goto unwind; 2278 2279 ce = rq->hw_context; 2280 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 2281 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 2282 rq = active_request(rq); 2283 2284 /* 2285 * Catch up with any missed context-switch interrupts. 2286 * 2287 * Ideally we would just read the remaining CSB entries now that we 2288 * know the gpu is idle. However, the CSB registers are sometimes^W 2289 * often trashed across a GPU reset! Instead we have to rely on 2290 * guessing the missed context-switch events by looking at what 2291 * requests were completed. 2292 */ 2293 execlists_cancel_port_requests(execlists); 2294 2295 if (!rq) { 2296 ce->ring->head = ce->ring->tail; 2297 goto out_replay; 2298 } 2299 2300 ce->ring->head = intel_ring_wrap(ce->ring, rq->head); 2301 2302 /* 2303 * If this request hasn't started yet, e.g. it is waiting on a 2304 * semaphore, we need to avoid skipping the request or else we 2305 * break the signaling chain. However, if the context is corrupt 2306 * the request will not restart and we will be stuck with a wedged 2307 * device. It is quite often the case that if we issue a reset 2308 * while the GPU is loading the context image, that the context 2309 * image becomes corrupt. 2310 * 2311 * Otherwise, if we have not started yet, the request should replay 2312 * perfectly and we do not need to flag the result as being erroneous. 2313 */ 2314 if (!i915_request_started(rq)) 2315 goto out_replay; 2316 2317 /* 2318 * If the request was innocent, we leave the request in the ELSP 2319 * and will try to replay it on restarting. The context image may 2320 * have been corrupted by the reset, in which case we may have 2321 * to service a new GPU hang, but more likely we can continue on 2322 * without impact. 2323 * 2324 * If the request was guilty, we presume the context is corrupt 2325 * and have to at least restore the RING register in the context 2326 * image back to the expected values to skip over the guilty request. 2327 */ 2328 __i915_request_reset(rq, stalled); 2329 if (!stalled) 2330 goto out_replay; 2331 2332 /* 2333 * We want a simple context + ring to execute the breadcrumb update. 2334 * We cannot rely on the context being intact across the GPU hang, 2335 * so clear it and rebuild just what we need for the breadcrumb. 2336 * All pending requests for this context will be zapped, and any 2337 * future request will be after userspace has had the opportunity 2338 * to recreate its own state. 2339 */ 2340 regs = ce->lrc_reg_state; 2341 if (engine->pinned_default_state) { 2342 memcpy(regs, /* skip restoring the vanilla PPHWSP */ 2343 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, 2344 engine->context_size - PAGE_SIZE); 2345 } 2346 execlists_init_reg_state(regs, ce, engine, ce->ring); 2347 2348 out_replay: 2349 GEM_TRACE("%s replay {head:%04x, tail:%04x\n", 2350 engine->name, ce->ring->head, ce->ring->tail); 2351 intel_ring_update_space(ce->ring); 2352 __execlists_update_reg_state(ce, engine); 2353 2354 unwind: 2355 /* Push back any incomplete requests for replay after the reset. */ 2356 __unwind_incomplete_requests(engine); 2357 } 2358 2359 static void execlists_reset(struct intel_engine_cs *engine, bool stalled) 2360 { 2361 unsigned long flags; 2362 2363 GEM_TRACE("%s\n", engine->name); 2364 2365 spin_lock_irqsave(&engine->active.lock, flags); 2366 2367 __execlists_reset(engine, stalled); 2368 2369 spin_unlock_irqrestore(&engine->active.lock, flags); 2370 } 2371 2372 static void nop_submission_tasklet(unsigned long data) 2373 { 2374 /* The driver is wedged; don't process any more events. */ 2375 } 2376 2377 static void execlists_cancel_requests(struct intel_engine_cs *engine) 2378 { 2379 struct intel_engine_execlists * const execlists = &engine->execlists; 2380 struct i915_request *rq, *rn; 2381 struct rb_node *rb; 2382 unsigned long flags; 2383 2384 GEM_TRACE("%s\n", engine->name); 2385 2386 /* 2387 * Before we call engine->cancel_requests(), we should have exclusive 2388 * access to the submission state. This is arranged for us by the 2389 * caller disabling the interrupt generation, the tasklet and other 2390 * threads that may then access the same state, giving us a free hand 2391 * to reset state. However, we still need to let lockdep be aware that 2392 * we know this state may be accessed in hardirq context, so we 2393 * disable the irq around this manipulation and we want to keep 2394 * the spinlock focused on its duties and not accidentally conflate 2395 * coverage to the submission's irq state. (Similarly, although we 2396 * shouldn't need to disable irq around the manipulation of the 2397 * submission's irq state, we also wish to remind ourselves that 2398 * it is irq state.) 2399 */ 2400 spin_lock_irqsave(&engine->active.lock, flags); 2401 2402 __execlists_reset(engine, true); 2403 2404 /* Mark all executing requests as skipped. */ 2405 list_for_each_entry(rq, &engine->active.requests, sched.link) { 2406 if (!i915_request_signaled(rq)) 2407 dma_fence_set_error(&rq->fence, -EIO); 2408 2409 i915_request_mark_complete(rq); 2410 } 2411 2412 /* Flush the queued requests to the timeline list (for retiring). */ 2413 while ((rb = rb_first_cached(&execlists->queue))) { 2414 struct i915_priolist *p = to_priolist(rb); 2415 int i; 2416 2417 priolist_for_each_request_consume(rq, rn, p, i) { 2418 list_del_init(&rq->sched.link); 2419 __i915_request_submit(rq); 2420 dma_fence_set_error(&rq->fence, -EIO); 2421 i915_request_mark_complete(rq); 2422 } 2423 2424 rb_erase_cached(&p->node, &execlists->queue); 2425 i915_priolist_free(p); 2426 } 2427 2428 /* Cancel all attached virtual engines */ 2429 while ((rb = rb_first_cached(&execlists->virtual))) { 2430 struct virtual_engine *ve = 2431 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2432 2433 rb_erase_cached(rb, &execlists->virtual); 2434 RB_CLEAR_NODE(rb); 2435 2436 spin_lock(&ve->base.active.lock); 2437 if (ve->request) { 2438 ve->request->engine = engine; 2439 __i915_request_submit(ve->request); 2440 dma_fence_set_error(&ve->request->fence, -EIO); 2441 i915_request_mark_complete(ve->request); 2442 ve->base.execlists.queue_priority_hint = INT_MIN; 2443 ve->request = NULL; 2444 } 2445 spin_unlock(&ve->base.active.lock); 2446 } 2447 2448 /* Remaining _unready_ requests will be nop'ed when submitted */ 2449 2450 execlists->queue_priority_hint = INT_MIN; 2451 execlists->queue = RB_ROOT_CACHED; 2452 2453 GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); 2454 execlists->tasklet.func = nop_submission_tasklet; 2455 2456 spin_unlock_irqrestore(&engine->active.lock, flags); 2457 } 2458 2459 static void execlists_reset_finish(struct intel_engine_cs *engine) 2460 { 2461 struct intel_engine_execlists * const execlists = &engine->execlists; 2462 2463 /* 2464 * After a GPU reset, we may have requests to replay. Do so now while 2465 * we still have the forcewake to be sure that the GPU is not allowed 2466 * to sleep before we restart and reload a context. 2467 */ 2468 GEM_BUG_ON(!reset_in_progress(execlists)); 2469 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) 2470 execlists->tasklet.func(execlists->tasklet.data); 2471 2472 if (__tasklet_enable(&execlists->tasklet)) 2473 /* And kick in case we missed a new request submission. */ 2474 tasklet_hi_schedule(&execlists->tasklet); 2475 GEM_TRACE("%s: depth->%d\n", engine->name, 2476 atomic_read(&execlists->tasklet.count)); 2477 } 2478 2479 static int gen8_emit_bb_start(struct i915_request *rq, 2480 u64 offset, u32 len, 2481 const unsigned int flags) 2482 { 2483 u32 *cs; 2484 2485 cs = intel_ring_begin(rq, 4); 2486 if (IS_ERR(cs)) 2487 return PTR_ERR(cs); 2488 2489 /* 2490 * WaDisableCtxRestoreArbitration:bdw,chv 2491 * 2492 * We don't need to perform MI_ARB_ENABLE as often as we do (in 2493 * particular all the gen that do not need the w/a at all!), if we 2494 * took care to make sure that on every switch into this context 2495 * (both ordinary and for preemption) that arbitrartion was enabled 2496 * we would be fine. However, for gen8 there is another w/a that 2497 * requires us to not preempt inside GPGPU execution, so we keep 2498 * arbitration disabled for gen8 batches. Arbitration will be 2499 * re-enabled before we close the request 2500 * (engine->emit_fini_breadcrumb). 2501 */ 2502 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 2503 2504 /* FIXME(BDW+): Address space and security selectors. */ 2505 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 2506 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 2507 *cs++ = lower_32_bits(offset); 2508 *cs++ = upper_32_bits(offset); 2509 2510 intel_ring_advance(rq, cs); 2511 2512 return 0; 2513 } 2514 2515 static int gen9_emit_bb_start(struct i915_request *rq, 2516 u64 offset, u32 len, 2517 const unsigned int flags) 2518 { 2519 u32 *cs; 2520 2521 cs = intel_ring_begin(rq, 6); 2522 if (IS_ERR(cs)) 2523 return PTR_ERR(cs); 2524 2525 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2526 2527 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 2528 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 2529 *cs++ = lower_32_bits(offset); 2530 *cs++ = upper_32_bits(offset); 2531 2532 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 2533 *cs++ = MI_NOOP; 2534 2535 intel_ring_advance(rq, cs); 2536 2537 return 0; 2538 } 2539 2540 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 2541 { 2542 ENGINE_WRITE(engine, RING_IMR, 2543 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 2544 ENGINE_POSTING_READ(engine, RING_IMR); 2545 } 2546 2547 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 2548 { 2549 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 2550 } 2551 2552 static int gen8_emit_flush(struct i915_request *request, u32 mode) 2553 { 2554 u32 cmd, *cs; 2555 2556 cs = intel_ring_begin(request, 4); 2557 if (IS_ERR(cs)) 2558 return PTR_ERR(cs); 2559 2560 cmd = MI_FLUSH_DW + 1; 2561 2562 /* We always require a command barrier so that subsequent 2563 * commands, such as breadcrumb interrupts, are strictly ordered 2564 * wrt the contents of the write cache being flushed to memory 2565 * (and thus being coherent from the CPU). 2566 */ 2567 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2568 2569 if (mode & EMIT_INVALIDATE) { 2570 cmd |= MI_INVALIDATE_TLB; 2571 if (request->engine->class == VIDEO_DECODE_CLASS) 2572 cmd |= MI_INVALIDATE_BSD; 2573 } 2574 2575 *cs++ = cmd; 2576 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 2577 *cs++ = 0; /* upper addr */ 2578 *cs++ = 0; /* value */ 2579 intel_ring_advance(request, cs); 2580 2581 return 0; 2582 } 2583 2584 static int gen8_emit_flush_render(struct i915_request *request, 2585 u32 mode) 2586 { 2587 struct intel_engine_cs *engine = request->engine; 2588 u32 scratch_addr = 2589 intel_gt_scratch_offset(engine->gt, 2590 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 2591 bool vf_flush_wa = false, dc_flush_wa = false; 2592 u32 *cs, flags = 0; 2593 int len; 2594 2595 flags |= PIPE_CONTROL_CS_STALL; 2596 2597 if (mode & EMIT_FLUSH) { 2598 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 2599 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 2600 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 2601 flags |= PIPE_CONTROL_FLUSH_ENABLE; 2602 } 2603 2604 if (mode & EMIT_INVALIDATE) { 2605 flags |= PIPE_CONTROL_TLB_INVALIDATE; 2606 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 2607 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 2608 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 2609 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 2610 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 2611 flags |= PIPE_CONTROL_QW_WRITE; 2612 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 2613 2614 /* 2615 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 2616 * pipe control. 2617 */ 2618 if (IS_GEN(request->i915, 9)) 2619 vf_flush_wa = true; 2620 2621 /* WaForGAMHang:kbl */ 2622 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) 2623 dc_flush_wa = true; 2624 } 2625 2626 len = 6; 2627 2628 if (vf_flush_wa) 2629 len += 6; 2630 2631 if (dc_flush_wa) 2632 len += 12; 2633 2634 cs = intel_ring_begin(request, len); 2635 if (IS_ERR(cs)) 2636 return PTR_ERR(cs); 2637 2638 if (vf_flush_wa) 2639 cs = gen8_emit_pipe_control(cs, 0, 0); 2640 2641 if (dc_flush_wa) 2642 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 2643 0); 2644 2645 cs = gen8_emit_pipe_control(cs, flags, scratch_addr); 2646 2647 if (dc_flush_wa) 2648 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); 2649 2650 intel_ring_advance(request, cs); 2651 2652 return 0; 2653 } 2654 2655 /* 2656 * Reserve space for 2 NOOPs at the end of each request to be 2657 * used as a workaround for not being allowed to do lite 2658 * restore with HEAD==TAIL (WaIdleLiteRestore). 2659 */ 2660 static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 2661 { 2662 /* Ensure there's always at least one preemption point per-request. */ 2663 *cs++ = MI_ARB_CHECK; 2664 *cs++ = MI_NOOP; 2665 request->wa_tail = intel_ring_offset(request, cs); 2666 2667 return cs; 2668 } 2669 2670 static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) 2671 { 2672 *cs++ = MI_SEMAPHORE_WAIT | 2673 MI_SEMAPHORE_GLOBAL_GTT | 2674 MI_SEMAPHORE_POLL | 2675 MI_SEMAPHORE_SAD_EQ_SDD; 2676 *cs++ = 0; 2677 *cs++ = intel_hws_preempt_address(request->engine); 2678 *cs++ = 0; 2679 2680 return cs; 2681 } 2682 2683 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) 2684 { 2685 cs = gen8_emit_ggtt_write(cs, 2686 request->fence.seqno, 2687 request->timeline->hwsp_offset, 2688 0); 2689 *cs++ = MI_USER_INTERRUPT; 2690 2691 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2692 if (intel_engine_has_semaphores(request->engine)) 2693 cs = emit_preempt_busywait(request, cs); 2694 2695 request->tail = intel_ring_offset(request, cs); 2696 assert_ring_tail_valid(request->ring, request->tail); 2697 2698 return gen8_emit_wa_tail(request, cs); 2699 } 2700 2701 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 2702 { 2703 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ 2704 cs = gen8_emit_ggtt_write_rcs(cs, 2705 request->fence.seqno, 2706 request->timeline->hwsp_offset, 2707 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 2708 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 2709 PIPE_CONTROL_DC_FLUSH_ENABLE); 2710 cs = gen8_emit_pipe_control(cs, 2711 PIPE_CONTROL_FLUSH_ENABLE | 2712 PIPE_CONTROL_CS_STALL, 2713 0); 2714 *cs++ = MI_USER_INTERRUPT; 2715 2716 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2717 if (intel_engine_has_semaphores(request->engine)) 2718 cs = emit_preempt_busywait(request, cs); 2719 2720 request->tail = intel_ring_offset(request, cs); 2721 assert_ring_tail_valid(request->ring, request->tail); 2722 2723 return gen8_emit_wa_tail(request, cs); 2724 } 2725 2726 static void execlists_park(struct intel_engine_cs *engine) 2727 { 2728 del_timer_sync(&engine->execlists.timer); 2729 intel_engine_park(engine); 2730 } 2731 2732 void intel_execlists_set_default_submission(struct intel_engine_cs *engine) 2733 { 2734 engine->submit_request = execlists_submit_request; 2735 engine->cancel_requests = execlists_cancel_requests; 2736 engine->schedule = i915_schedule; 2737 engine->execlists.tasklet.func = execlists_submission_tasklet; 2738 2739 engine->reset.prepare = execlists_reset_prepare; 2740 engine->reset.reset = execlists_reset; 2741 engine->reset.finish = execlists_reset_finish; 2742 2743 engine->park = execlists_park; 2744 engine->unpark = NULL; 2745 2746 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 2747 if (!intel_vgpu_active(engine->i915)) { 2748 engine->flags |= I915_ENGINE_HAS_SEMAPHORES; 2749 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 2750 engine->flags |= I915_ENGINE_HAS_PREEMPTION; 2751 } 2752 } 2753 2754 static void execlists_destroy(struct intel_engine_cs *engine) 2755 { 2756 intel_engine_cleanup_common(engine); 2757 lrc_destroy_wa_ctx(engine); 2758 kfree(engine); 2759 } 2760 2761 static void 2762 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 2763 { 2764 /* Default vfuncs which can be overriden by each engine. */ 2765 2766 engine->destroy = execlists_destroy; 2767 engine->resume = execlists_resume; 2768 2769 engine->reset.prepare = execlists_reset_prepare; 2770 engine->reset.reset = execlists_reset; 2771 engine->reset.finish = execlists_reset_finish; 2772 2773 engine->cops = &execlists_context_ops; 2774 engine->request_alloc = execlists_request_alloc; 2775 2776 engine->emit_flush = gen8_emit_flush; 2777 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; 2778 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; 2779 2780 engine->set_default_submission = intel_execlists_set_default_submission; 2781 2782 if (INTEL_GEN(engine->i915) < 11) { 2783 engine->irq_enable = gen8_logical_ring_enable_irq; 2784 engine->irq_disable = gen8_logical_ring_disable_irq; 2785 } else { 2786 /* 2787 * TODO: On Gen11 interrupt masks need to be clear 2788 * to allow C6 entry. Keep interrupts enabled at 2789 * and take the hit of generating extra interrupts 2790 * until a more refined solution exists. 2791 */ 2792 } 2793 if (IS_GEN(engine->i915, 8)) 2794 engine->emit_bb_start = gen8_emit_bb_start; 2795 else 2796 engine->emit_bb_start = gen9_emit_bb_start; 2797 } 2798 2799 static inline void 2800 logical_ring_default_irqs(struct intel_engine_cs *engine) 2801 { 2802 unsigned int shift = 0; 2803 2804 if (INTEL_GEN(engine->i915) < 11) { 2805 const u8 irq_shifts[] = { 2806 [RCS0] = GEN8_RCS_IRQ_SHIFT, 2807 [BCS0] = GEN8_BCS_IRQ_SHIFT, 2808 [VCS0] = GEN8_VCS0_IRQ_SHIFT, 2809 [VCS1] = GEN8_VCS1_IRQ_SHIFT, 2810 [VECS0] = GEN8_VECS_IRQ_SHIFT, 2811 }; 2812 2813 shift = irq_shifts[engine->id]; 2814 } 2815 2816 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 2817 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 2818 } 2819 2820 int intel_execlists_submission_setup(struct intel_engine_cs *engine) 2821 { 2822 /* Intentionally left blank. */ 2823 engine->buffer = NULL; 2824 2825 tasklet_init(&engine->execlists.tasklet, 2826 execlists_submission_tasklet, (unsigned long)engine); 2827 timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); 2828 2829 logical_ring_default_vfuncs(engine); 2830 logical_ring_default_irqs(engine); 2831 2832 if (engine->class == RENDER_CLASS) { 2833 engine->emit_flush = gen8_emit_flush_render; 2834 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; 2835 } 2836 2837 return 0; 2838 } 2839 2840 int intel_execlists_submission_init(struct intel_engine_cs *engine) 2841 { 2842 struct intel_engine_execlists * const execlists = &engine->execlists; 2843 struct drm_i915_private *i915 = engine->i915; 2844 struct intel_uncore *uncore = engine->uncore; 2845 u32 base = engine->mmio_base; 2846 int ret; 2847 2848 ret = intel_engine_init_common(engine); 2849 if (ret) 2850 return ret; 2851 2852 if (intel_init_workaround_bb(engine)) 2853 /* 2854 * We continue even if we fail to initialize WA batch 2855 * because we only expect rare glitches but nothing 2856 * critical to prevent us from using GPU 2857 */ 2858 DRM_ERROR("WA batch buffer initialization failed\n"); 2859 2860 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2861 execlists->submit_reg = uncore->regs + 2862 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); 2863 execlists->ctrl_reg = uncore->regs + 2864 i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); 2865 } else { 2866 execlists->submit_reg = uncore->regs + 2867 i915_mmio_reg_offset(RING_ELSP(base)); 2868 } 2869 2870 execlists->csb_status = 2871 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 2872 2873 execlists->csb_write = 2874 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; 2875 2876 if (INTEL_GEN(i915) < 11) 2877 execlists->csb_size = GEN8_CSB_ENTRIES; 2878 else 2879 execlists->csb_size = GEN11_CSB_ENTRIES; 2880 2881 reset_csb_pointers(engine); 2882 2883 return 0; 2884 } 2885 2886 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) 2887 { 2888 u32 indirect_ctx_offset; 2889 2890 switch (INTEL_GEN(engine->i915)) { 2891 default: 2892 MISSING_CASE(INTEL_GEN(engine->i915)); 2893 /* fall through */ 2894 case 11: 2895 indirect_ctx_offset = 2896 GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2897 break; 2898 case 10: 2899 indirect_ctx_offset = 2900 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2901 break; 2902 case 9: 2903 indirect_ctx_offset = 2904 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2905 break; 2906 case 8: 2907 indirect_ctx_offset = 2908 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2909 break; 2910 } 2911 2912 return indirect_ctx_offset; 2913 } 2914 2915 static void execlists_init_reg_state(u32 *regs, 2916 struct intel_context *ce, 2917 struct intel_engine_cs *engine, 2918 struct intel_ring *ring) 2919 { 2920 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); 2921 bool rcs = engine->class == RENDER_CLASS; 2922 u32 base = engine->mmio_base; 2923 2924 /* 2925 * A context is actually a big batch buffer with several 2926 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The 2927 * values we are setting here are only for the first context restore: 2928 * on a subsequent save, the GPU will recreate this batchbuffer with new 2929 * values (including all the missing MI_LOAD_REGISTER_IMM commands that 2930 * we are not initializing here). 2931 * 2932 * Must keep consistent with virtual_update_register_offsets(). 2933 */ 2934 regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | 2935 MI_LRI_FORCE_POSTED; 2936 2937 CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), 2938 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | 2939 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); 2940 if (INTEL_GEN(engine->i915) < 11) { 2941 regs[CTX_CONTEXT_CONTROL + 1] |= 2942 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | 2943 CTX_CTRL_RS_CTX_ENABLE); 2944 } 2945 CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); 2946 CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); 2947 CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); 2948 CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base), 2949 RING_CTL_SIZE(ring->size) | RING_VALID); 2950 CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0); 2951 CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0); 2952 CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT); 2953 CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); 2954 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); 2955 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); 2956 if (rcs) { 2957 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 2958 2959 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); 2960 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, 2961 RING_INDIRECT_CTX_OFFSET(base), 0); 2962 if (wa_ctx->indirect_ctx.size) { 2963 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 2964 2965 regs[CTX_RCS_INDIRECT_CTX + 1] = 2966 (ggtt_offset + wa_ctx->indirect_ctx.offset) | 2967 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); 2968 2969 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = 2970 intel_lr_indirect_ctx_offset(engine) << 6; 2971 } 2972 2973 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); 2974 if (wa_ctx->per_ctx.size) { 2975 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 2976 2977 regs[CTX_BB_PER_CTX_PTR + 1] = 2978 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 2979 } 2980 } 2981 2982 regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2983 2984 CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); 2985 /* PDP values well be assigned later if needed */ 2986 CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0); 2987 CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0); 2988 CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0); 2989 CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0); 2990 CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0); 2991 CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0); 2992 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0); 2993 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0); 2994 2995 if (i915_vm_is_4lvl(&ppgtt->vm)) { 2996 /* 64b PPGTT (48bit canonical) 2997 * PDP0_DESCRIPTOR contains the base address to PML4 and 2998 * other PDP Descriptors are ignored. 2999 */ 3000 ASSIGN_CTX_PML4(ppgtt, regs); 3001 } else { 3002 ASSIGN_CTX_PDP(ppgtt, regs, 3); 3003 ASSIGN_CTX_PDP(ppgtt, regs, 2); 3004 ASSIGN_CTX_PDP(ppgtt, regs, 1); 3005 ASSIGN_CTX_PDP(ppgtt, regs, 0); 3006 } 3007 3008 if (rcs) { 3009 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 3010 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); 3011 } 3012 3013 regs[CTX_END] = MI_BATCH_BUFFER_END; 3014 if (INTEL_GEN(engine->i915) >= 10) 3015 regs[CTX_END] |= BIT(0); 3016 } 3017 3018 static int 3019 populate_lr_context(struct intel_context *ce, 3020 struct drm_i915_gem_object *ctx_obj, 3021 struct intel_engine_cs *engine, 3022 struct intel_ring *ring) 3023 { 3024 void *vaddr; 3025 u32 *regs; 3026 int ret; 3027 3028 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 3029 if (IS_ERR(vaddr)) { 3030 ret = PTR_ERR(vaddr); 3031 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); 3032 return ret; 3033 } 3034 3035 if (engine->default_state) { 3036 /* 3037 * We only want to copy over the template context state; 3038 * skipping over the headers reserved for GuC communication, 3039 * leaving those as zero. 3040 */ 3041 const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE; 3042 void *defaults; 3043 3044 defaults = i915_gem_object_pin_map(engine->default_state, 3045 I915_MAP_WB); 3046 if (IS_ERR(defaults)) { 3047 ret = PTR_ERR(defaults); 3048 goto err_unpin_ctx; 3049 } 3050 3051 memcpy(vaddr + start, defaults + start, engine->context_size); 3052 i915_gem_object_unpin_map(engine->default_state); 3053 } 3054 3055 /* The second page of the context object contains some fields which must 3056 * be set up prior to the first execution. */ 3057 regs = vaddr + LRC_STATE_PN * PAGE_SIZE; 3058 execlists_init_reg_state(regs, ce, engine, ring); 3059 if (!engine->default_state) 3060 regs[CTX_CONTEXT_CONTROL + 1] |= 3061 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 3062 3063 ret = 0; 3064 err_unpin_ctx: 3065 __i915_gem_object_flush_map(ctx_obj, 3066 LRC_HEADER_PAGES * PAGE_SIZE, 3067 engine->context_size); 3068 i915_gem_object_unpin_map(ctx_obj); 3069 return ret; 3070 } 3071 3072 static struct intel_timeline * 3073 get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt) 3074 { 3075 if (ctx->timeline) 3076 return intel_timeline_get(ctx->timeline); 3077 else 3078 return intel_timeline_create(gt, NULL); 3079 } 3080 3081 static int execlists_context_deferred_alloc(struct intel_context *ce, 3082 struct intel_engine_cs *engine) 3083 { 3084 struct drm_i915_gem_object *ctx_obj; 3085 struct i915_vma *vma; 3086 u32 context_size; 3087 struct intel_ring *ring; 3088 struct intel_timeline *timeline; 3089 int ret; 3090 3091 if (ce->state) 3092 return 0; 3093 3094 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 3095 3096 /* 3097 * Before the actual start of the context image, we insert a few pages 3098 * for our own use and for sharing with the GuC. 3099 */ 3100 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 3101 3102 ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); 3103 if (IS_ERR(ctx_obj)) 3104 return PTR_ERR(ctx_obj); 3105 3106 vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); 3107 if (IS_ERR(vma)) { 3108 ret = PTR_ERR(vma); 3109 goto error_deref_obj; 3110 } 3111 3112 timeline = get_timeline(ce->gem_context, engine->gt); 3113 if (IS_ERR(timeline)) { 3114 ret = PTR_ERR(timeline); 3115 goto error_deref_obj; 3116 } 3117 3118 ring = intel_engine_create_ring(engine, 3119 timeline, 3120 ce->gem_context->ring_size); 3121 intel_timeline_put(timeline); 3122 if (IS_ERR(ring)) { 3123 ret = PTR_ERR(ring); 3124 goto error_deref_obj; 3125 } 3126 3127 ret = populate_lr_context(ce, ctx_obj, engine, ring); 3128 if (ret) { 3129 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 3130 goto error_ring_free; 3131 } 3132 3133 ce->ring = ring; 3134 ce->state = vma; 3135 3136 return 0; 3137 3138 error_ring_free: 3139 intel_ring_put(ring); 3140 error_deref_obj: 3141 i915_gem_object_put(ctx_obj); 3142 return ret; 3143 } 3144 3145 static struct list_head *virtual_queue(struct virtual_engine *ve) 3146 { 3147 return &ve->base.execlists.default_priolist.requests[0]; 3148 } 3149 3150 static void virtual_context_destroy(struct kref *kref) 3151 { 3152 struct virtual_engine *ve = 3153 container_of(kref, typeof(*ve), context.ref); 3154 unsigned int n; 3155 3156 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 3157 GEM_BUG_ON(ve->request); 3158 GEM_BUG_ON(ve->context.inflight); 3159 3160 for (n = 0; n < ve->num_siblings; n++) { 3161 struct intel_engine_cs *sibling = ve->siblings[n]; 3162 struct rb_node *node = &ve->nodes[sibling->id].rb; 3163 3164 if (RB_EMPTY_NODE(node)) 3165 continue; 3166 3167 spin_lock_irq(&sibling->active.lock); 3168 3169 /* Detachment is lazily performed in the execlists tasklet */ 3170 if (!RB_EMPTY_NODE(node)) 3171 rb_erase_cached(node, &sibling->execlists.virtual); 3172 3173 spin_unlock_irq(&sibling->active.lock); 3174 } 3175 GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); 3176 3177 if (ve->context.state) 3178 __execlists_context_fini(&ve->context); 3179 intel_context_fini(&ve->context); 3180 3181 kfree(ve->bonds); 3182 kfree(ve); 3183 } 3184 3185 static void virtual_engine_initial_hint(struct virtual_engine *ve) 3186 { 3187 int swp; 3188 3189 /* 3190 * Pick a random sibling on starting to help spread the load around. 3191 * 3192 * New contexts are typically created with exactly the same order 3193 * of siblings, and often started in batches. Due to the way we iterate 3194 * the array of sibling when submitting requests, sibling[0] is 3195 * prioritised for dequeuing. If we make sure that sibling[0] is fairly 3196 * randomised across the system, we also help spread the load by the 3197 * first engine we inspect being different each time. 3198 * 3199 * NB This does not force us to execute on this engine, it will just 3200 * typically be the first we inspect for submission. 3201 */ 3202 swp = prandom_u32_max(ve->num_siblings); 3203 if (!swp) 3204 return; 3205 3206 swap(ve->siblings[swp], ve->siblings[0]); 3207 virtual_update_register_offsets(ve->context.lrc_reg_state, 3208 ve->siblings[0]); 3209 } 3210 3211 static int virtual_context_pin(struct intel_context *ce) 3212 { 3213 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3214 int err; 3215 3216 /* Note: we must use a real engine class for setting up reg state */ 3217 err = __execlists_context_pin(ce, ve->siblings[0]); 3218 if (err) 3219 return err; 3220 3221 virtual_engine_initial_hint(ve); 3222 return 0; 3223 } 3224 3225 static void virtual_context_enter(struct intel_context *ce) 3226 { 3227 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3228 unsigned int n; 3229 3230 for (n = 0; n < ve->num_siblings; n++) 3231 intel_engine_pm_get(ve->siblings[n]); 3232 } 3233 3234 static void virtual_context_exit(struct intel_context *ce) 3235 { 3236 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3237 unsigned int n; 3238 3239 for (n = 0; n < ve->num_siblings; n++) 3240 intel_engine_pm_put(ve->siblings[n]); 3241 } 3242 3243 static const struct intel_context_ops virtual_context_ops = { 3244 .pin = virtual_context_pin, 3245 .unpin = execlists_context_unpin, 3246 3247 .enter = virtual_context_enter, 3248 .exit = virtual_context_exit, 3249 3250 .destroy = virtual_context_destroy, 3251 }; 3252 3253 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) 3254 { 3255 struct i915_request *rq; 3256 intel_engine_mask_t mask; 3257 3258 rq = READ_ONCE(ve->request); 3259 if (!rq) 3260 return 0; 3261 3262 /* The rq is ready for submission; rq->execution_mask is now stable. */ 3263 mask = rq->execution_mask; 3264 if (unlikely(!mask)) { 3265 /* Invalid selection, submit to a random engine in error */ 3266 i915_request_skip(rq, -ENODEV); 3267 mask = ve->siblings[0]->mask; 3268 } 3269 3270 GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n", 3271 ve->base.name, 3272 rq->fence.context, rq->fence.seqno, 3273 mask, ve->base.execlists.queue_priority_hint); 3274 3275 return mask; 3276 } 3277 3278 static void virtual_submission_tasklet(unsigned long data) 3279 { 3280 struct virtual_engine * const ve = (struct virtual_engine *)data; 3281 const int prio = ve->base.execlists.queue_priority_hint; 3282 intel_engine_mask_t mask; 3283 unsigned int n; 3284 3285 rcu_read_lock(); 3286 mask = virtual_submission_mask(ve); 3287 rcu_read_unlock(); 3288 if (unlikely(!mask)) 3289 return; 3290 3291 local_irq_disable(); 3292 for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) { 3293 struct intel_engine_cs *sibling = ve->siblings[n]; 3294 struct ve_node * const node = &ve->nodes[sibling->id]; 3295 struct rb_node **parent, *rb; 3296 bool first; 3297 3298 if (unlikely(!(mask & sibling->mask))) { 3299 if (!RB_EMPTY_NODE(&node->rb)) { 3300 spin_lock(&sibling->active.lock); 3301 rb_erase_cached(&node->rb, 3302 &sibling->execlists.virtual); 3303 RB_CLEAR_NODE(&node->rb); 3304 spin_unlock(&sibling->active.lock); 3305 } 3306 continue; 3307 } 3308 3309 spin_lock(&sibling->active.lock); 3310 3311 if (!RB_EMPTY_NODE(&node->rb)) { 3312 /* 3313 * Cheat and avoid rebalancing the tree if we can 3314 * reuse this node in situ. 3315 */ 3316 first = rb_first_cached(&sibling->execlists.virtual) == 3317 &node->rb; 3318 if (prio == node->prio || (prio > node->prio && first)) 3319 goto submit_engine; 3320 3321 rb_erase_cached(&node->rb, &sibling->execlists.virtual); 3322 } 3323 3324 rb = NULL; 3325 first = true; 3326 parent = &sibling->execlists.virtual.rb_root.rb_node; 3327 while (*parent) { 3328 struct ve_node *other; 3329 3330 rb = *parent; 3331 other = rb_entry(rb, typeof(*other), rb); 3332 if (prio > other->prio) { 3333 parent = &rb->rb_left; 3334 } else { 3335 parent = &rb->rb_right; 3336 first = false; 3337 } 3338 } 3339 3340 rb_link_node(&node->rb, rb, parent); 3341 rb_insert_color_cached(&node->rb, 3342 &sibling->execlists.virtual, 3343 first); 3344 3345 submit_engine: 3346 GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); 3347 node->prio = prio; 3348 if (first && prio > sibling->execlists.queue_priority_hint) { 3349 sibling->execlists.queue_priority_hint = prio; 3350 tasklet_hi_schedule(&sibling->execlists.tasklet); 3351 } 3352 3353 spin_unlock(&sibling->active.lock); 3354 } 3355 local_irq_enable(); 3356 } 3357 3358 static void virtual_submit_request(struct i915_request *rq) 3359 { 3360 struct virtual_engine *ve = to_virtual_engine(rq->engine); 3361 3362 GEM_TRACE("%s: rq=%llx:%lld\n", 3363 ve->base.name, 3364 rq->fence.context, 3365 rq->fence.seqno); 3366 3367 GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); 3368 3369 GEM_BUG_ON(ve->request); 3370 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 3371 3372 ve->base.execlists.queue_priority_hint = rq_prio(rq); 3373 WRITE_ONCE(ve->request, rq); 3374 3375 list_move_tail(&rq->sched.link, virtual_queue(ve)); 3376 3377 tasklet_schedule(&ve->base.execlists.tasklet); 3378 } 3379 3380 static struct ve_bond * 3381 virtual_find_bond(struct virtual_engine *ve, 3382 const struct intel_engine_cs *master) 3383 { 3384 int i; 3385 3386 for (i = 0; i < ve->num_bonds; i++) { 3387 if (ve->bonds[i].master == master) 3388 return &ve->bonds[i]; 3389 } 3390 3391 return NULL; 3392 } 3393 3394 static void 3395 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) 3396 { 3397 struct virtual_engine *ve = to_virtual_engine(rq->engine); 3398 struct ve_bond *bond; 3399 3400 bond = virtual_find_bond(ve, to_request(signal)->engine); 3401 if (bond) { 3402 intel_engine_mask_t old, new, cmp; 3403 3404 cmp = READ_ONCE(rq->execution_mask); 3405 do { 3406 old = cmp; 3407 new = cmp & bond->sibling_mask; 3408 } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old); 3409 } 3410 } 3411 3412 struct intel_context * 3413 intel_execlists_create_virtual(struct i915_gem_context *ctx, 3414 struct intel_engine_cs **siblings, 3415 unsigned int count) 3416 { 3417 struct virtual_engine *ve; 3418 unsigned int n; 3419 int err; 3420 3421 if (count == 0) 3422 return ERR_PTR(-EINVAL); 3423 3424 if (count == 1) 3425 return intel_context_create(ctx, siblings[0]); 3426 3427 ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); 3428 if (!ve) 3429 return ERR_PTR(-ENOMEM); 3430 3431 ve->base.i915 = ctx->i915; 3432 ve->base.gt = siblings[0]->gt; 3433 ve->base.id = -1; 3434 ve->base.class = OTHER_CLASS; 3435 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; 3436 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 3437 3438 /* 3439 * The decision on whether to submit a request using semaphores 3440 * depends on the saturated state of the engine. We only compute 3441 * this during HW submission of the request, and we need for this 3442 * state to be globally applied to all requests being submitted 3443 * to this engine. Virtual engines encompass more than one physical 3444 * engine and so we cannot accurately tell in advance if one of those 3445 * engines is already saturated and so cannot afford to use a semaphore 3446 * and be pessimized in priority for doing so -- if we are the only 3447 * context using semaphores after all other clients have stopped, we 3448 * will be starved on the saturated system. Such a global switch for 3449 * semaphores is less than ideal, but alas is the current compromise. 3450 */ 3451 ve->base.saturated = ALL_ENGINES; 3452 3453 snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); 3454 3455 intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); 3456 3457 intel_engine_init_execlists(&ve->base); 3458 3459 ve->base.cops = &virtual_context_ops; 3460 ve->base.request_alloc = execlists_request_alloc; 3461 3462 ve->base.schedule = i915_schedule; 3463 ve->base.submit_request = virtual_submit_request; 3464 ve->base.bond_execute = virtual_bond_execute; 3465 3466 INIT_LIST_HEAD(virtual_queue(ve)); 3467 ve->base.execlists.queue_priority_hint = INT_MIN; 3468 tasklet_init(&ve->base.execlists.tasklet, 3469 virtual_submission_tasklet, 3470 (unsigned long)ve); 3471 3472 intel_context_init(&ve->context, ctx, &ve->base); 3473 3474 for (n = 0; n < count; n++) { 3475 struct intel_engine_cs *sibling = siblings[n]; 3476 3477 GEM_BUG_ON(!is_power_of_2(sibling->mask)); 3478 if (sibling->mask & ve->base.mask) { 3479 DRM_DEBUG("duplicate %s entry in load balancer\n", 3480 sibling->name); 3481 err = -EINVAL; 3482 goto err_put; 3483 } 3484 3485 /* 3486 * The virtual engine implementation is tightly coupled to 3487 * the execlists backend -- we push out request directly 3488 * into a tree inside each physical engine. We could support 3489 * layering if we handle cloning of the requests and 3490 * submitting a copy into each backend. 3491 */ 3492 if (sibling->execlists.tasklet.func != 3493 execlists_submission_tasklet) { 3494 err = -ENODEV; 3495 goto err_put; 3496 } 3497 3498 GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); 3499 RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); 3500 3501 ve->siblings[ve->num_siblings++] = sibling; 3502 ve->base.mask |= sibling->mask; 3503 3504 /* 3505 * All physical engines must be compatible for their emission 3506 * functions (as we build the instructions during request 3507 * construction and do not alter them before submission 3508 * on the physical engine). We use the engine class as a guide 3509 * here, although that could be refined. 3510 */ 3511 if (ve->base.class != OTHER_CLASS) { 3512 if (ve->base.class != sibling->class) { 3513 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", 3514 sibling->class, ve->base.class); 3515 err = -EINVAL; 3516 goto err_put; 3517 } 3518 continue; 3519 } 3520 3521 ve->base.class = sibling->class; 3522 ve->base.uabi_class = sibling->uabi_class; 3523 snprintf(ve->base.name, sizeof(ve->base.name), 3524 "v%dx%d", ve->base.class, count); 3525 ve->base.context_size = sibling->context_size; 3526 3527 ve->base.emit_bb_start = sibling->emit_bb_start; 3528 ve->base.emit_flush = sibling->emit_flush; 3529 ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; 3530 ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; 3531 ve->base.emit_fini_breadcrumb_dw = 3532 sibling->emit_fini_breadcrumb_dw; 3533 3534 ve->base.flags = sibling->flags; 3535 } 3536 3537 ve->base.flags |= I915_ENGINE_IS_VIRTUAL; 3538 3539 return &ve->context; 3540 3541 err_put: 3542 intel_context_put(&ve->context); 3543 return ERR_PTR(err); 3544 } 3545 3546 struct intel_context * 3547 intel_execlists_clone_virtual(struct i915_gem_context *ctx, 3548 struct intel_engine_cs *src) 3549 { 3550 struct virtual_engine *se = to_virtual_engine(src); 3551 struct intel_context *dst; 3552 3553 dst = intel_execlists_create_virtual(ctx, 3554 se->siblings, 3555 se->num_siblings); 3556 if (IS_ERR(dst)) 3557 return dst; 3558 3559 if (se->num_bonds) { 3560 struct virtual_engine *de = to_virtual_engine(dst->engine); 3561 3562 de->bonds = kmemdup(se->bonds, 3563 sizeof(*se->bonds) * se->num_bonds, 3564 GFP_KERNEL); 3565 if (!de->bonds) { 3566 intel_context_put(dst); 3567 return ERR_PTR(-ENOMEM); 3568 } 3569 3570 de->num_bonds = se->num_bonds; 3571 } 3572 3573 return dst; 3574 } 3575 3576 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, 3577 const struct intel_engine_cs *master, 3578 const struct intel_engine_cs *sibling) 3579 { 3580 struct virtual_engine *ve = to_virtual_engine(engine); 3581 struct ve_bond *bond; 3582 int n; 3583 3584 /* Sanity check the sibling is part of the virtual engine */ 3585 for (n = 0; n < ve->num_siblings; n++) 3586 if (sibling == ve->siblings[n]) 3587 break; 3588 if (n == ve->num_siblings) 3589 return -EINVAL; 3590 3591 bond = virtual_find_bond(ve, master); 3592 if (bond) { 3593 bond->sibling_mask |= sibling->mask; 3594 return 0; 3595 } 3596 3597 bond = krealloc(ve->bonds, 3598 sizeof(*bond) * (ve->num_bonds + 1), 3599 GFP_KERNEL); 3600 if (!bond) 3601 return -ENOMEM; 3602 3603 bond[ve->num_bonds].master = master; 3604 bond[ve->num_bonds].sibling_mask = sibling->mask; 3605 3606 ve->bonds = bond; 3607 ve->num_bonds++; 3608 3609 return 0; 3610 } 3611 3612 void intel_execlists_show_requests(struct intel_engine_cs *engine, 3613 struct drm_printer *m, 3614 void (*show_request)(struct drm_printer *m, 3615 struct i915_request *rq, 3616 const char *prefix), 3617 unsigned int max) 3618 { 3619 const struct intel_engine_execlists *execlists = &engine->execlists; 3620 struct i915_request *rq, *last; 3621 unsigned long flags; 3622 unsigned int count; 3623 struct rb_node *rb; 3624 3625 spin_lock_irqsave(&engine->active.lock, flags); 3626 3627 last = NULL; 3628 count = 0; 3629 list_for_each_entry(rq, &engine->active.requests, sched.link) { 3630 if (count++ < max - 1) 3631 show_request(m, rq, "\t\tE "); 3632 else 3633 last = rq; 3634 } 3635 if (last) { 3636 if (count > max) { 3637 drm_printf(m, 3638 "\t\t...skipping %d executing requests...\n", 3639 count - max); 3640 } 3641 show_request(m, last, "\t\tE "); 3642 } 3643 3644 last = NULL; 3645 count = 0; 3646 if (execlists->queue_priority_hint != INT_MIN) 3647 drm_printf(m, "\t\tQueue priority hint: %d\n", 3648 execlists->queue_priority_hint); 3649 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 3650 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 3651 int i; 3652 3653 priolist_for_each_request(rq, p, i) { 3654 if (count++ < max - 1) 3655 show_request(m, rq, "\t\tQ "); 3656 else 3657 last = rq; 3658 } 3659 } 3660 if (last) { 3661 if (count > max) { 3662 drm_printf(m, 3663 "\t\t...skipping %d queued requests...\n", 3664 count - max); 3665 } 3666 show_request(m, last, "\t\tQ "); 3667 } 3668 3669 last = NULL; 3670 count = 0; 3671 for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { 3672 struct virtual_engine *ve = 3673 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 3674 struct i915_request *rq = READ_ONCE(ve->request); 3675 3676 if (rq) { 3677 if (count++ < max - 1) 3678 show_request(m, rq, "\t\tV "); 3679 else 3680 last = rq; 3681 } 3682 } 3683 if (last) { 3684 if (count > max) { 3685 drm_printf(m, 3686 "\t\t...skipping %d virtual requests...\n", 3687 count - max); 3688 } 3689 show_request(m, last, "\t\tV "); 3690 } 3691 3692 spin_unlock_irqrestore(&engine->active.lock, flags); 3693 } 3694 3695 void intel_lr_context_reset(struct intel_engine_cs *engine, 3696 struct intel_context *ce, 3697 u32 head, 3698 bool scrub) 3699 { 3700 /* 3701 * We want a simple context + ring to execute the breadcrumb update. 3702 * We cannot rely on the context being intact across the GPU hang, 3703 * so clear it and rebuild just what we need for the breadcrumb. 3704 * All pending requests for this context will be zapped, and any 3705 * future request will be after userspace has had the opportunity 3706 * to recreate its own state. 3707 */ 3708 if (scrub) { 3709 u32 *regs = ce->lrc_reg_state; 3710 3711 if (engine->pinned_default_state) { 3712 memcpy(regs, /* skip restoring the vanilla PPHWSP */ 3713 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, 3714 engine->context_size - PAGE_SIZE); 3715 } 3716 execlists_init_reg_state(regs, ce, engine, ce->ring); 3717 } 3718 3719 /* Rerun the request; its payload has been neutered (if guilty). */ 3720 ce->ring->head = head; 3721 intel_ring_update_space(ce->ring); 3722 3723 __execlists_update_reg_state(ce, engine); 3724 } 3725 3726 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 3727 #include "selftest_lrc.c" 3728 #endif 3729