1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include "gem/i915_gem_context.h" 137 138 #include "i915_drv.h" 139 #include "i915_gem_render_state.h" 140 #include "i915_vgpu.h" 141 #include "intel_engine_pm.h" 142 #include "intel_lrc_reg.h" 143 #include "intel_mocs.h" 144 #include "intel_reset.h" 145 #include "intel_workarounds.h" 146 147 #define RING_EXECLIST_QFULL (1 << 0x2) 148 #define RING_EXECLIST1_VALID (1 << 0x3) 149 #define RING_EXECLIST0_VALID (1 << 0x4) 150 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 151 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 152 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 153 154 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 155 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 156 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 157 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 158 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 159 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 160 161 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 162 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) 163 164 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 165 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 166 #define WA_TAIL_DWORDS 2 167 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) 168 169 struct virtual_engine { 170 struct intel_engine_cs base; 171 struct intel_context context; 172 173 /* 174 * We allow only a single request through the virtual engine at a time 175 * (each request in the timeline waits for the completion fence of 176 * the previous before being submitted). By restricting ourselves to 177 * only submitting a single request, each request is placed on to a 178 * physical to maximise load spreading (by virtue of the late greedy 179 * scheduling -- each real engine takes the next available request 180 * upon idling). 181 */ 182 struct i915_request *request; 183 184 /* 185 * We keep a rbtree of available virtual engines inside each physical 186 * engine, sorted by priority. Here we preallocate the nodes we need 187 * for the virtual engine, indexed by physical_engine->id. 188 */ 189 struct ve_node { 190 struct rb_node rb; 191 int prio; 192 } nodes[I915_NUM_ENGINES]; 193 194 /* 195 * Keep track of bonded pairs -- restrictions upon on our selection 196 * of physical engines any particular request may be submitted to. 197 * If we receive a submit-fence from a master engine, we will only 198 * use one of sibling_mask physical engines. 199 */ 200 struct ve_bond { 201 const struct intel_engine_cs *master; 202 intel_engine_mask_t sibling_mask; 203 } *bonds; 204 unsigned int num_bonds; 205 206 /* And finally, which physical engines this virtual engine maps onto. */ 207 unsigned int num_siblings; 208 struct intel_engine_cs *siblings[0]; 209 }; 210 211 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) 212 { 213 GEM_BUG_ON(!intel_engine_is_virtual(engine)); 214 return container_of(engine, struct virtual_engine, base); 215 } 216 217 static int execlists_context_deferred_alloc(struct intel_context *ce, 218 struct intel_engine_cs *engine); 219 static void execlists_init_reg_state(u32 *reg_state, 220 struct intel_context *ce, 221 struct intel_engine_cs *engine, 222 struct intel_ring *ring); 223 224 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 225 { 226 return rb_entry(rb, struct i915_priolist, node); 227 } 228 229 static inline int rq_prio(const struct i915_request *rq) 230 { 231 return rq->sched.attr.priority; 232 } 233 234 static int effective_prio(const struct i915_request *rq) 235 { 236 int prio = rq_prio(rq); 237 238 /* 239 * On unwinding the active request, we give it a priority bump 240 * if it has completed waiting on any semaphore. If we know that 241 * the request has already started, we can prevent an unwanted 242 * preempt-to-idle cycle by taking that into account now. 243 */ 244 if (__i915_request_has_started(rq)) 245 prio |= I915_PRIORITY_NOSEMAPHORE; 246 247 /* Restrict mere WAIT boosts from triggering preemption */ 248 return prio | __NO_PREEMPTION; 249 } 250 251 static int queue_prio(const struct intel_engine_execlists *execlists) 252 { 253 struct i915_priolist *p; 254 struct rb_node *rb; 255 256 rb = rb_first_cached(&execlists->queue); 257 if (!rb) 258 return INT_MIN; 259 260 /* 261 * As the priolist[] are inverted, with the highest priority in [0], 262 * we have to flip the index value to become priority. 263 */ 264 p = to_priolist(rb); 265 return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); 266 } 267 268 static inline bool need_preempt(const struct intel_engine_cs *engine, 269 const struct i915_request *rq, 270 struct rb_node *rb) 271 { 272 int last_prio; 273 274 if (!engine->preempt_context) 275 return false; 276 277 if (i915_request_completed(rq)) 278 return false; 279 280 /* 281 * Check if the current priority hint merits a preemption attempt. 282 * 283 * We record the highest value priority we saw during rescheduling 284 * prior to this dequeue, therefore we know that if it is strictly 285 * less than the current tail of ESLP[0], we do not need to force 286 * a preempt-to-idle cycle. 287 * 288 * However, the priority hint is a mere hint that we may need to 289 * preempt. If that hint is stale or we may be trying to preempt 290 * ourselves, ignore the request. 291 */ 292 last_prio = effective_prio(rq); 293 if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint, 294 last_prio)) 295 return false; 296 297 /* 298 * Check against the first request in ELSP[1], it will, thanks to the 299 * power of PI, be the highest priority of that context. 300 */ 301 if (!list_is_last(&rq->link, &engine->timeline.requests) && 302 rq_prio(list_next_entry(rq, link)) > last_prio) 303 return true; 304 305 if (rb) { 306 struct virtual_engine *ve = 307 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 308 bool preempt = false; 309 310 if (engine == ve->siblings[0]) { /* only preempt one sibling */ 311 struct i915_request *next; 312 313 rcu_read_lock(); 314 next = READ_ONCE(ve->request); 315 if (next) 316 preempt = rq_prio(next) > last_prio; 317 rcu_read_unlock(); 318 } 319 320 if (preempt) 321 return preempt; 322 } 323 324 /* 325 * If the inflight context did not trigger the preemption, then maybe 326 * it was the set of queued requests? Pick the highest priority in 327 * the queue (the first active priolist) and see if it deserves to be 328 * running instead of ELSP[0]. 329 * 330 * The highest priority request in the queue can not be either 331 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same 332 * context, it's priority would not exceed ELSP[0] aka last_prio. 333 */ 334 return queue_prio(&engine->execlists) > last_prio; 335 } 336 337 __maybe_unused static inline bool 338 assert_priority_queue(const struct i915_request *prev, 339 const struct i915_request *next) 340 { 341 const struct intel_engine_execlists *execlists = 342 &prev->engine->execlists; 343 344 /* 345 * Without preemption, the prev may refer to the still active element 346 * which we refuse to let go. 347 * 348 * Even with preemption, there are times when we think it is better not 349 * to preempt and leave an ostensibly lower priority request in flight. 350 */ 351 if (port_request(execlists->port) == prev) 352 return true; 353 354 return rq_prio(prev) >= rq_prio(next); 355 } 356 357 /* 358 * The context descriptor encodes various attributes of a context, 359 * including its GTT address and some flags. Because it's fairly 360 * expensive to calculate, we'll just do it once and cache the result, 361 * which remains valid until the context is unpinned. 362 * 363 * This is what a descriptor looks like, from LSB to MSB:: 364 * 365 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) 366 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 367 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) 368 * bits 53-54: mbz, reserved for use by hardware 369 * bits 55-63: group ID, currently unused and set to 0 370 * 371 * Starting from Gen11, the upper dword of the descriptor has a new format: 372 * 373 * bits 32-36: reserved 374 * bits 37-47: SW context ID 375 * bits 48:53: engine instance 376 * bit 54: mbz, reserved for use by hardware 377 * bits 55-60: SW counter 378 * bits 61-63: engine class 379 * 380 * engine info, SW context ID and SW counter need to form a unique number 381 * (Context ID) per lrc. 382 */ 383 static u64 384 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) 385 { 386 struct i915_gem_context *ctx = ce->gem_context; 387 u64 desc; 388 389 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); 390 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH))); 391 392 desc = ctx->desc_template; /* bits 0-11 */ 393 GEM_BUG_ON(desc & GENMASK_ULL(63, 12)); 394 395 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; 396 /* bits 12-31 */ 397 GEM_BUG_ON(desc & GENMASK_ULL(63, 32)); 398 399 /* 400 * The following 32bits are copied into the OA reports (dword 2). 401 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing 402 * anything below. 403 */ 404 if (INTEL_GEN(engine->i915) >= 11) { 405 GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); 406 desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; 407 /* bits 37-47 */ 408 409 desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; 410 /* bits 48-53 */ 411 412 /* TODO: decide what to do with SW counter (bits 55-60) */ 413 414 desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; 415 /* bits 61-63 */ 416 } else { 417 GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH)); 418 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ 419 } 420 421 return desc; 422 } 423 424 static void unwind_wa_tail(struct i915_request *rq) 425 { 426 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); 427 assert_ring_tail_valid(rq->ring, rq->tail); 428 } 429 430 static struct i915_request * 431 __unwind_incomplete_requests(struct intel_engine_cs *engine) 432 { 433 struct i915_request *rq, *rn, *active = NULL; 434 struct list_head *uninitialized_var(pl); 435 int prio = I915_PRIORITY_INVALID; 436 437 lockdep_assert_held(&engine->timeline.lock); 438 439 list_for_each_entry_safe_reverse(rq, rn, 440 &engine->timeline.requests, 441 link) { 442 struct intel_engine_cs *owner; 443 444 if (i915_request_completed(rq)) 445 break; 446 447 __i915_request_unsubmit(rq); 448 unwind_wa_tail(rq); 449 450 GEM_BUG_ON(rq->hw_context->active); 451 452 /* 453 * Push the request back into the queue for later resubmission. 454 * If this request is not native to this physical engine (i.e. 455 * it came from a virtual source), push it back onto the virtual 456 * engine so that it can be moved across onto another physical 457 * engine as load dictates. 458 */ 459 owner = rq->hw_context->engine; 460 if (likely(owner == engine)) { 461 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 462 if (rq_prio(rq) != prio) { 463 prio = rq_prio(rq); 464 pl = i915_sched_lookup_priolist(engine, prio); 465 } 466 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 467 468 list_add(&rq->sched.link, pl); 469 active = rq; 470 } else { 471 rq->engine = owner; 472 owner->submit_request(rq); 473 active = NULL; 474 } 475 } 476 477 return active; 478 } 479 480 struct i915_request * 481 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) 482 { 483 struct intel_engine_cs *engine = 484 container_of(execlists, typeof(*engine), execlists); 485 486 return __unwind_incomplete_requests(engine); 487 } 488 489 static inline void 490 execlists_context_status_change(struct i915_request *rq, unsigned long status) 491 { 492 /* 493 * Only used when GVT-g is enabled now. When GVT-g is disabled, 494 * The compiler should eliminate this function as dead-code. 495 */ 496 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 497 return; 498 499 atomic_notifier_call_chain(&rq->engine->context_status_notifier, 500 status, rq); 501 } 502 503 inline void 504 execlists_user_begin(struct intel_engine_execlists *execlists, 505 const struct execlist_port *port) 506 { 507 execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER); 508 } 509 510 inline void 511 execlists_user_end(struct intel_engine_execlists *execlists) 512 { 513 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); 514 } 515 516 static inline void 517 execlists_context_schedule_in(struct i915_request *rq) 518 { 519 GEM_BUG_ON(rq->hw_context->active); 520 521 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 522 intel_engine_context_in(rq->engine); 523 rq->hw_context->active = rq->engine; 524 } 525 526 static void kick_siblings(struct i915_request *rq) 527 { 528 struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine); 529 struct i915_request *next = READ_ONCE(ve->request); 530 531 if (next && next->execution_mask & ~rq->execution_mask) 532 tasklet_schedule(&ve->base.execlists.tasklet); 533 } 534 535 static inline void 536 execlists_context_schedule_out(struct i915_request *rq, unsigned long status) 537 { 538 rq->hw_context->active = NULL; 539 intel_engine_context_out(rq->engine); 540 execlists_context_status_change(rq, status); 541 trace_i915_request_out(rq); 542 543 /* 544 * If this is part of a virtual engine, its next request may have 545 * been blocked waiting for access to the active context. We have 546 * to kick all the siblings again in case we need to switch (e.g. 547 * the next request is not runnable on this engine). Hopefully, 548 * we will already have submitted the next request before the 549 * tasklet runs and do not need to rebuild each virtual tree 550 * and kick everyone again. 551 */ 552 if (rq->engine != rq->hw_context->engine) 553 kick_siblings(rq); 554 } 555 556 static u64 execlists_update_context(struct i915_request *rq) 557 { 558 struct intel_context *ce = rq->hw_context; 559 560 ce->lrc_reg_state[CTX_RING_TAIL + 1] = 561 intel_ring_set_tail(rq->ring, rq->tail); 562 563 /* 564 * Make sure the context image is complete before we submit it to HW. 565 * 566 * Ostensibly, writes (including the WCB) should be flushed prior to 567 * an uncached write such as our mmio register access, the empirical 568 * evidence (esp. on Braswell) suggests that the WC write into memory 569 * may not be visible to the HW prior to the completion of the UC 570 * register write and that we may begin execution from the context 571 * before its image is complete leading to invalid PD chasing. 572 * 573 * Furthermore, Braswell, at least, wants a full mb to be sure that 574 * the writes are coherent in memory (visible to the GPU) prior to 575 * execution, and not just visible to other CPUs (as is the result of 576 * wmb). 577 */ 578 mb(); 579 return ce->lrc_desc; 580 } 581 582 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 583 { 584 if (execlists->ctrl_reg) { 585 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); 586 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); 587 } else { 588 writel(upper_32_bits(desc), execlists->submit_reg); 589 writel(lower_32_bits(desc), execlists->submit_reg); 590 } 591 } 592 593 static void execlists_submit_ports(struct intel_engine_cs *engine) 594 { 595 struct intel_engine_execlists *execlists = &engine->execlists; 596 struct execlist_port *port = execlists->port; 597 unsigned int n; 598 599 /* 600 * We can skip acquiring intel_runtime_pm_get() here as it was taken 601 * on our behalf by the request (see i915_gem_mark_busy()) and it will 602 * not be relinquished until the device is idle (see 603 * i915_gem_idle_work_handler()). As a precaution, we make sure 604 * that all ELSP are drained i.e. we have processed the CSB, 605 * before allowing ourselves to idle and calling intel_runtime_pm_put(). 606 */ 607 GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref)); 608 609 /* 610 * ELSQ note: the submit queue is not cleared after being submitted 611 * to the HW so we need to make sure we always clean it up. This is 612 * currently ensured by the fact that we always write the same number 613 * of elsq entries, keep this in mind before changing the loop below. 614 */ 615 for (n = execlists_num_ports(execlists); n--; ) { 616 struct i915_request *rq; 617 unsigned int count; 618 u64 desc; 619 620 rq = port_unpack(&port[n], &count); 621 if (rq) { 622 GEM_BUG_ON(count > !n); 623 if (!count++) 624 execlists_context_schedule_in(rq); 625 port_set(&port[n], port_pack(rq, count)); 626 desc = execlists_update_context(rq); 627 GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); 628 629 GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", 630 engine->name, n, 631 port[n].context_id, count, 632 rq->fence.context, rq->fence.seqno, 633 hwsp_seqno(rq), 634 rq_prio(rq)); 635 } else { 636 GEM_BUG_ON(!n); 637 desc = 0; 638 } 639 640 write_desc(execlists, desc, n); 641 } 642 643 /* we need to manually load the submit queue */ 644 if (execlists->ctrl_reg) 645 writel(EL_CTRL_LOAD, execlists->ctrl_reg); 646 647 execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); 648 } 649 650 static bool ctx_single_port_submission(const struct intel_context *ce) 651 { 652 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 653 i915_gem_context_force_single_submission(ce->gem_context)); 654 } 655 656 static bool can_merge_ctx(const struct intel_context *prev, 657 const struct intel_context *next) 658 { 659 if (prev != next) 660 return false; 661 662 if (ctx_single_port_submission(prev)) 663 return false; 664 665 return true; 666 } 667 668 static bool can_merge_rq(const struct i915_request *prev, 669 const struct i915_request *next) 670 { 671 GEM_BUG_ON(!assert_priority_queue(prev, next)); 672 673 if (!can_merge_ctx(prev->hw_context, next->hw_context)) 674 return false; 675 676 return true; 677 } 678 679 static void port_assign(struct execlist_port *port, struct i915_request *rq) 680 { 681 GEM_BUG_ON(rq == port_request(port)); 682 683 if (port_isset(port)) 684 i915_request_put(port_request(port)); 685 686 port_set(port, port_pack(i915_request_get(rq), port_count(port))); 687 } 688 689 static void inject_preempt_context(struct intel_engine_cs *engine) 690 { 691 struct intel_engine_execlists *execlists = &engine->execlists; 692 struct intel_context *ce = engine->preempt_context; 693 unsigned int n; 694 695 GEM_BUG_ON(execlists->preempt_complete_status != 696 upper_32_bits(ce->lrc_desc)); 697 698 /* 699 * Switch to our empty preempt context so 700 * the state of the GPU is known (idle). 701 */ 702 GEM_TRACE("%s\n", engine->name); 703 for (n = execlists_num_ports(execlists); --n; ) 704 write_desc(execlists, 0, n); 705 706 write_desc(execlists, ce->lrc_desc, n); 707 708 /* we need to manually load the submit queue */ 709 if (execlists->ctrl_reg) 710 writel(EL_CTRL_LOAD, execlists->ctrl_reg); 711 712 execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK); 713 execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT); 714 715 (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); 716 } 717 718 static void complete_preempt_context(struct intel_engine_execlists *execlists) 719 { 720 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)); 721 722 if (inject_preempt_hang(execlists)) 723 return; 724 725 execlists_cancel_port_requests(execlists); 726 __unwind_incomplete_requests(container_of(execlists, 727 struct intel_engine_cs, 728 execlists)); 729 } 730 731 static void virtual_update_register_offsets(u32 *regs, 732 struct intel_engine_cs *engine) 733 { 734 u32 base = engine->mmio_base; 735 736 /* Must match execlists_init_reg_state()! */ 737 738 regs[CTX_CONTEXT_CONTROL] = 739 i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base)); 740 regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base)); 741 regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base)); 742 regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base)); 743 regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base)); 744 745 regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base)); 746 regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base)); 747 regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base)); 748 regs[CTX_SECOND_BB_HEAD_U] = 749 i915_mmio_reg_offset(RING_SBBADDR_UDW(base)); 750 regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base)); 751 regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base)); 752 753 regs[CTX_CTX_TIMESTAMP] = 754 i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base)); 755 regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3)); 756 regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3)); 757 regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2)); 758 regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2)); 759 regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1)); 760 regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1)); 761 regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 762 regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 763 764 if (engine->class == RENDER_CLASS) { 765 regs[CTX_RCS_INDIRECT_CTX] = 766 i915_mmio_reg_offset(RING_INDIRECT_CTX(base)); 767 regs[CTX_RCS_INDIRECT_CTX_OFFSET] = 768 i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base)); 769 regs[CTX_BB_PER_CTX_PTR] = 770 i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base)); 771 772 regs[CTX_R_PWR_CLK_STATE] = 773 i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); 774 } 775 } 776 777 static bool virtual_matches(const struct virtual_engine *ve, 778 const struct i915_request *rq, 779 const struct intel_engine_cs *engine) 780 { 781 const struct intel_engine_cs *active; 782 783 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ 784 return false; 785 786 /* 787 * We track when the HW has completed saving the context image 788 * (i.e. when we have seen the final CS event switching out of 789 * the context) and must not overwrite the context image before 790 * then. This restricts us to only using the active engine 791 * while the previous virtualized request is inflight (so 792 * we reuse the register offsets). This is a very small 793 * hystersis on the greedy seelction algorithm. 794 */ 795 active = READ_ONCE(ve->context.active); 796 if (active && active != engine) 797 return false; 798 799 return true; 800 } 801 802 static void virtual_xfer_breadcrumbs(struct virtual_engine *ve, 803 struct intel_engine_cs *engine) 804 { 805 struct intel_engine_cs *old = ve->siblings[0]; 806 807 /* All unattached (rq->engine == old) must already be completed */ 808 809 spin_lock(&old->breadcrumbs.irq_lock); 810 if (!list_empty(&ve->context.signal_link)) { 811 list_move_tail(&ve->context.signal_link, 812 &engine->breadcrumbs.signalers); 813 intel_engine_queue_breadcrumbs(engine); 814 } 815 spin_unlock(&old->breadcrumbs.irq_lock); 816 } 817 818 static void execlists_dequeue(struct intel_engine_cs *engine) 819 { 820 struct intel_engine_execlists * const execlists = &engine->execlists; 821 struct execlist_port *port = execlists->port; 822 const struct execlist_port * const last_port = 823 &execlists->port[execlists->port_mask]; 824 struct i915_request *last = port_request(port); 825 struct rb_node *rb; 826 bool submit = false; 827 828 /* 829 * Hardware submission is through 2 ports. Conceptually each port 830 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 831 * static for a context, and unique to each, so we only execute 832 * requests belonging to a single context from each ring. RING_HEAD 833 * is maintained by the CS in the context image, it marks the place 834 * where it got up to last time, and through RING_TAIL we tell the CS 835 * where we want to execute up to this time. 836 * 837 * In this list the requests are in order of execution. Consecutive 838 * requests from the same context are adjacent in the ringbuffer. We 839 * can combine these requests into a single RING_TAIL update: 840 * 841 * RING_HEAD...req1...req2 842 * ^- RING_TAIL 843 * since to execute req2 the CS must first execute req1. 844 * 845 * Our goal then is to point each port to the end of a consecutive 846 * sequence of requests as being the most optimal (fewest wake ups 847 * and context switches) submission. 848 */ 849 850 for (rb = rb_first_cached(&execlists->virtual); rb; ) { 851 struct virtual_engine *ve = 852 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 853 struct i915_request *rq = READ_ONCE(ve->request); 854 855 if (!rq) { /* lazily cleanup after another engine handled rq */ 856 rb_erase_cached(rb, &execlists->virtual); 857 RB_CLEAR_NODE(rb); 858 rb = rb_first_cached(&execlists->virtual); 859 continue; 860 } 861 862 if (!virtual_matches(ve, rq, engine)) { 863 rb = rb_next(rb); 864 continue; 865 } 866 867 break; 868 } 869 870 if (last) { 871 /* 872 * Don't resubmit or switch until all outstanding 873 * preemptions (lite-restore) are seen. Then we 874 * know the next preemption status we see corresponds 875 * to this ELSP update. 876 */ 877 GEM_BUG_ON(!execlists_is_active(execlists, 878 EXECLISTS_ACTIVE_USER)); 879 GEM_BUG_ON(!port_count(&port[0])); 880 881 /* 882 * If we write to ELSP a second time before the HW has had 883 * a chance to respond to the previous write, we can confuse 884 * the HW and hit "undefined behaviour". After writing to ELSP, 885 * we must then wait until we see a context-switch event from 886 * the HW to indicate that it has had a chance to respond. 887 */ 888 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) 889 return; 890 891 if (need_preempt(engine, last, rb)) { 892 inject_preempt_context(engine); 893 return; 894 } 895 896 /* 897 * In theory, we could coalesce more requests onto 898 * the second port (the first port is active, with 899 * no preemptions pending). However, that means we 900 * then have to deal with the possible lite-restore 901 * of the second port (as we submit the ELSP, there 902 * may be a context-switch) but also we may complete 903 * the resubmission before the context-switch. Ergo, 904 * coalescing onto the second port will cause a 905 * preemption event, but we cannot predict whether 906 * that will affect port[0] or port[1]. 907 * 908 * If the second port is already active, we can wait 909 * until the next context-switch before contemplating 910 * new requests. The GPU will be busy and we should be 911 * able to resubmit the new ELSP before it idles, 912 * avoiding pipeline bubbles (momentary pauses where 913 * the driver is unable to keep up the supply of new 914 * work). However, we have to double check that the 915 * priorities of the ports haven't been switch. 916 */ 917 if (port_count(&port[1])) 918 return; 919 920 /* 921 * WaIdleLiteRestore:bdw,skl 922 * Apply the wa NOOPs to prevent 923 * ring:HEAD == rq:TAIL as we resubmit the 924 * request. See gen8_emit_fini_breadcrumb() for 925 * where we prepare the padding after the 926 * end of the request. 927 */ 928 last->tail = last->wa_tail; 929 } 930 931 while (rb) { /* XXX virtual is always taking precedence */ 932 struct virtual_engine *ve = 933 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 934 struct i915_request *rq; 935 936 spin_lock(&ve->base.timeline.lock); 937 938 rq = ve->request; 939 if (unlikely(!rq)) { /* lost the race to a sibling */ 940 spin_unlock(&ve->base.timeline.lock); 941 rb_erase_cached(rb, &execlists->virtual); 942 RB_CLEAR_NODE(rb); 943 rb = rb_first_cached(&execlists->virtual); 944 continue; 945 } 946 947 GEM_BUG_ON(rq != ve->request); 948 GEM_BUG_ON(rq->engine != &ve->base); 949 GEM_BUG_ON(rq->hw_context != &ve->context); 950 951 if (rq_prio(rq) >= queue_prio(execlists)) { 952 if (!virtual_matches(ve, rq, engine)) { 953 spin_unlock(&ve->base.timeline.lock); 954 rb = rb_next(rb); 955 continue; 956 } 957 958 if (last && !can_merge_rq(last, rq)) { 959 spin_unlock(&ve->base.timeline.lock); 960 return; /* leave this rq for another engine */ 961 } 962 963 GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n", 964 engine->name, 965 rq->fence.context, 966 rq->fence.seqno, 967 i915_request_completed(rq) ? "!" : 968 i915_request_started(rq) ? "*" : 969 "", 970 yesno(engine != ve->siblings[0])); 971 972 ve->request = NULL; 973 ve->base.execlists.queue_priority_hint = INT_MIN; 974 rb_erase_cached(rb, &execlists->virtual); 975 RB_CLEAR_NODE(rb); 976 977 GEM_BUG_ON(!(rq->execution_mask & engine->mask)); 978 rq->engine = engine; 979 980 if (engine != ve->siblings[0]) { 981 u32 *regs = ve->context.lrc_reg_state; 982 unsigned int n; 983 984 GEM_BUG_ON(READ_ONCE(ve->context.active)); 985 virtual_update_register_offsets(regs, engine); 986 987 if (!list_empty(&ve->context.signals)) 988 virtual_xfer_breadcrumbs(ve, engine); 989 990 /* 991 * Move the bound engine to the top of the list 992 * for future execution. We then kick this 993 * tasklet first before checking others, so that 994 * we preferentially reuse this set of bound 995 * registers. 996 */ 997 for (n = 1; n < ve->num_siblings; n++) { 998 if (ve->siblings[n] == engine) { 999 swap(ve->siblings[n], 1000 ve->siblings[0]); 1001 break; 1002 } 1003 } 1004 1005 GEM_BUG_ON(ve->siblings[0] != engine); 1006 } 1007 1008 __i915_request_submit(rq); 1009 trace_i915_request_in(rq, port_index(port, execlists)); 1010 submit = true; 1011 last = rq; 1012 } 1013 1014 spin_unlock(&ve->base.timeline.lock); 1015 break; 1016 } 1017 1018 while ((rb = rb_first_cached(&execlists->queue))) { 1019 struct i915_priolist *p = to_priolist(rb); 1020 struct i915_request *rq, *rn; 1021 int i; 1022 1023 priolist_for_each_request_consume(rq, rn, p, i) { 1024 /* 1025 * Can we combine this request with the current port? 1026 * It has to be the same context/ringbuffer and not 1027 * have any exceptions (e.g. GVT saying never to 1028 * combine contexts). 1029 * 1030 * If we can combine the requests, we can execute both 1031 * by updating the RING_TAIL to point to the end of the 1032 * second request, and so we never need to tell the 1033 * hardware about the first. 1034 */ 1035 if (last && !can_merge_rq(last, rq)) { 1036 /* 1037 * If we are on the second port and cannot 1038 * combine this request with the last, then we 1039 * are done. 1040 */ 1041 if (port == last_port) 1042 goto done; 1043 1044 /* 1045 * We must not populate both ELSP[] with the 1046 * same LRCA, i.e. we must submit 2 different 1047 * contexts if we submit 2 ELSP. 1048 */ 1049 if (last->hw_context == rq->hw_context) 1050 goto done; 1051 1052 /* 1053 * If GVT overrides us we only ever submit 1054 * port[0], leaving port[1] empty. Note that we 1055 * also have to be careful that we don't queue 1056 * the same context (even though a different 1057 * request) to the second port. 1058 */ 1059 if (ctx_single_port_submission(last->hw_context) || 1060 ctx_single_port_submission(rq->hw_context)) 1061 goto done; 1062 1063 1064 if (submit) 1065 port_assign(port, last); 1066 port++; 1067 1068 GEM_BUG_ON(port_isset(port)); 1069 } 1070 1071 list_del_init(&rq->sched.link); 1072 1073 __i915_request_submit(rq); 1074 trace_i915_request_in(rq, port_index(port, execlists)); 1075 1076 last = rq; 1077 submit = true; 1078 } 1079 1080 rb_erase_cached(&p->node, &execlists->queue); 1081 i915_priolist_free(p); 1082 } 1083 1084 done: 1085 /* 1086 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. 1087 * 1088 * We choose the priority hint such that if we add a request of greater 1089 * priority than this, we kick the submission tasklet to decide on 1090 * the right order of submitting the requests to hardware. We must 1091 * also be prepared to reorder requests as they are in-flight on the 1092 * HW. We derive the priority hint then as the first "hole" in 1093 * the HW submission ports and if there are no available slots, 1094 * the priority of the lowest executing request, i.e. last. 1095 * 1096 * When we do receive a higher priority request ready to run from the 1097 * user, see queue_request(), the priority hint is bumped to that 1098 * request triggering preemption on the next dequeue (or subsequent 1099 * interrupt for secondary ports). 1100 */ 1101 execlists->queue_priority_hint = queue_prio(execlists); 1102 1103 if (submit) { 1104 port_assign(port, last); 1105 execlists_submit_ports(engine); 1106 } 1107 1108 /* We must always keep the beast fed if we have work piled up */ 1109 GEM_BUG_ON(rb_first_cached(&execlists->queue) && 1110 !port_isset(execlists->port)); 1111 1112 /* Re-evaluate the executing context setup after each preemptive kick */ 1113 if (last) 1114 execlists_user_begin(execlists, execlists->port); 1115 1116 /* If the engine is now idle, so should be the flag; and vice versa. */ 1117 GEM_BUG_ON(execlists_is_active(&engine->execlists, 1118 EXECLISTS_ACTIVE_USER) == 1119 !port_isset(engine->execlists.port)); 1120 } 1121 1122 void 1123 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) 1124 { 1125 struct execlist_port *port = execlists->port; 1126 unsigned int num_ports = execlists_num_ports(execlists); 1127 1128 while (num_ports-- && port_isset(port)) { 1129 struct i915_request *rq = port_request(port); 1130 1131 GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n", 1132 rq->engine->name, 1133 (unsigned int)(port - execlists->port), 1134 rq->fence.context, rq->fence.seqno, 1135 hwsp_seqno(rq)); 1136 1137 GEM_BUG_ON(!execlists->active); 1138 execlists_context_schedule_out(rq, 1139 i915_request_completed(rq) ? 1140 INTEL_CONTEXT_SCHEDULE_OUT : 1141 INTEL_CONTEXT_SCHEDULE_PREEMPTED); 1142 1143 i915_request_put(rq); 1144 1145 memset(port, 0, sizeof(*port)); 1146 port++; 1147 } 1148 1149 execlists_clear_all_active(execlists); 1150 } 1151 1152 static inline void 1153 invalidate_csb_entries(const u32 *first, const u32 *last) 1154 { 1155 clflush((void *)first); 1156 clflush((void *)last); 1157 } 1158 1159 static inline bool 1160 reset_in_progress(const struct intel_engine_execlists *execlists) 1161 { 1162 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); 1163 } 1164 1165 static void process_csb(struct intel_engine_cs *engine) 1166 { 1167 struct intel_engine_execlists * const execlists = &engine->execlists; 1168 struct execlist_port *port = execlists->port; 1169 const u32 * const buf = execlists->csb_status; 1170 const u8 num_entries = execlists->csb_size; 1171 u8 head, tail; 1172 1173 lockdep_assert_held(&engine->timeline.lock); 1174 1175 /* 1176 * Note that csb_write, csb_status may be either in HWSP or mmio. 1177 * When reading from the csb_write mmio register, we have to be 1178 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is 1179 * the low 4bits. As it happens we know the next 4bits are always 1180 * zero and so we can simply masked off the low u8 of the register 1181 * and treat it identically to reading from the HWSP (without having 1182 * to use explicit shifting and masking, and probably bifurcating 1183 * the code to handle the legacy mmio read). 1184 */ 1185 head = execlists->csb_head; 1186 tail = READ_ONCE(*execlists->csb_write); 1187 GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail); 1188 if (unlikely(head == tail)) 1189 return; 1190 1191 /* 1192 * Hopefully paired with a wmb() in HW! 1193 * 1194 * We must complete the read of the write pointer before any reads 1195 * from the CSB, so that we do not see stale values. Without an rmb 1196 * (lfence) the HW may speculatively perform the CSB[] reads *before* 1197 * we perform the READ_ONCE(*csb_write). 1198 */ 1199 rmb(); 1200 1201 do { 1202 struct i915_request *rq; 1203 unsigned int status; 1204 unsigned int count; 1205 1206 if (++head == num_entries) 1207 head = 0; 1208 1209 /* 1210 * We are flying near dragons again. 1211 * 1212 * We hold a reference to the request in execlist_port[] 1213 * but no more than that. We are operating in softirq 1214 * context and so cannot hold any mutex or sleep. That 1215 * prevents us stopping the requests we are processing 1216 * in port[] from being retired simultaneously (the 1217 * breadcrumb will be complete before we see the 1218 * context-switch). As we only hold the reference to the 1219 * request, any pointer chasing underneath the request 1220 * is subject to a potential use-after-free. Thus we 1221 * store all of the bookkeeping within port[] as 1222 * required, and avoid using unguarded pointers beneath 1223 * request itself. The same applies to the atomic 1224 * status notifier. 1225 */ 1226 1227 GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n", 1228 engine->name, head, 1229 buf[2 * head + 0], buf[2 * head + 1], 1230 execlists->active); 1231 1232 status = buf[2 * head]; 1233 if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE | 1234 GEN8_CTX_STATUS_PREEMPTED)) 1235 execlists_set_active(execlists, 1236 EXECLISTS_ACTIVE_HWACK); 1237 if (status & GEN8_CTX_STATUS_ACTIVE_IDLE) 1238 execlists_clear_active(execlists, 1239 EXECLISTS_ACTIVE_HWACK); 1240 1241 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) 1242 continue; 1243 1244 /* We should never get a COMPLETED | IDLE_ACTIVE! */ 1245 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); 1246 1247 if (status & GEN8_CTX_STATUS_COMPLETE && 1248 buf[2*head + 1] == execlists->preempt_complete_status) { 1249 GEM_TRACE("%s preempt-idle\n", engine->name); 1250 complete_preempt_context(execlists); 1251 continue; 1252 } 1253 1254 if (status & GEN8_CTX_STATUS_PREEMPTED && 1255 execlists_is_active(execlists, 1256 EXECLISTS_ACTIVE_PREEMPT)) 1257 continue; 1258 1259 GEM_BUG_ON(!execlists_is_active(execlists, 1260 EXECLISTS_ACTIVE_USER)); 1261 1262 rq = port_unpack(port, &count); 1263 GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", 1264 engine->name, 1265 port->context_id, count, 1266 rq ? rq->fence.context : 0, 1267 rq ? rq->fence.seqno : 0, 1268 rq ? hwsp_seqno(rq) : 0, 1269 rq ? rq_prio(rq) : 0); 1270 1271 /* Check the context/desc id for this event matches */ 1272 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); 1273 1274 GEM_BUG_ON(count == 0); 1275 if (--count == 0) { 1276 /* 1277 * On the final event corresponding to the 1278 * submission of this context, we expect either 1279 * an element-switch event or a completion 1280 * event (and on completion, the active-idle 1281 * marker). No more preemptions, lite-restore 1282 * or otherwise. 1283 */ 1284 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); 1285 GEM_BUG_ON(port_isset(&port[1]) && 1286 !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); 1287 GEM_BUG_ON(!port_isset(&port[1]) && 1288 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); 1289 1290 /* 1291 * We rely on the hardware being strongly 1292 * ordered, that the breadcrumb write is 1293 * coherent (visible from the CPU) before the 1294 * user interrupt and CSB is processed. 1295 */ 1296 GEM_BUG_ON(!i915_request_completed(rq)); 1297 1298 execlists_context_schedule_out(rq, 1299 INTEL_CONTEXT_SCHEDULE_OUT); 1300 i915_request_put(rq); 1301 1302 GEM_TRACE("%s completed ctx=%d\n", 1303 engine->name, port->context_id); 1304 1305 port = execlists_port_complete(execlists, port); 1306 if (port_isset(port)) 1307 execlists_user_begin(execlists, port); 1308 else 1309 execlists_user_end(execlists); 1310 } else { 1311 port_set(port, port_pack(rq, count)); 1312 } 1313 } while (head != tail); 1314 1315 execlists->csb_head = head; 1316 1317 /* 1318 * Gen11 has proven to fail wrt global observation point between 1319 * entry and tail update, failing on the ordering and thus 1320 * we see an old entry in the context status buffer. 1321 * 1322 * Forcibly evict out entries for the next gpu csb update, 1323 * to increase the odds that we get a fresh entries with non 1324 * working hardware. The cost for doing so comes out mostly with 1325 * the wash as hardware, working or not, will need to do the 1326 * invalidation before. 1327 */ 1328 invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); 1329 } 1330 1331 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 1332 { 1333 lockdep_assert_held(&engine->timeline.lock); 1334 1335 process_csb(engine); 1336 if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT)) 1337 execlists_dequeue(engine); 1338 } 1339 1340 /* 1341 * Check the unread Context Status Buffers and manage the submission of new 1342 * contexts to the ELSP accordingly. 1343 */ 1344 static void execlists_submission_tasklet(unsigned long data) 1345 { 1346 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 1347 unsigned long flags; 1348 1349 GEM_TRACE("%s awake?=%d, active=%x\n", 1350 engine->name, 1351 !!intel_wakeref_active(&engine->wakeref), 1352 engine->execlists.active); 1353 1354 spin_lock_irqsave(&engine->timeline.lock, flags); 1355 __execlists_submission_tasklet(engine); 1356 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1357 } 1358 1359 static void queue_request(struct intel_engine_cs *engine, 1360 struct i915_sched_node *node, 1361 int prio) 1362 { 1363 list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); 1364 } 1365 1366 static void __submit_queue_imm(struct intel_engine_cs *engine) 1367 { 1368 struct intel_engine_execlists * const execlists = &engine->execlists; 1369 1370 if (reset_in_progress(execlists)) 1371 return; /* defer until we restart the engine following reset */ 1372 1373 if (execlists->tasklet.func == execlists_submission_tasklet) 1374 __execlists_submission_tasklet(engine); 1375 else 1376 tasklet_hi_schedule(&execlists->tasklet); 1377 } 1378 1379 static void submit_queue(struct intel_engine_cs *engine, int prio) 1380 { 1381 if (prio > engine->execlists.queue_priority_hint) { 1382 engine->execlists.queue_priority_hint = prio; 1383 __submit_queue_imm(engine); 1384 } 1385 } 1386 1387 static void execlists_submit_request(struct i915_request *request) 1388 { 1389 struct intel_engine_cs *engine = request->engine; 1390 unsigned long flags; 1391 1392 /* Will be called from irq-context when using foreign fences. */ 1393 spin_lock_irqsave(&engine->timeline.lock, flags); 1394 1395 queue_request(engine, &request->sched, rq_prio(request)); 1396 1397 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 1398 GEM_BUG_ON(list_empty(&request->sched.link)); 1399 1400 submit_queue(engine, rq_prio(request)); 1401 1402 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1403 } 1404 1405 static void __execlists_context_fini(struct intel_context *ce) 1406 { 1407 intel_ring_put(ce->ring); 1408 1409 GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); 1410 i915_gem_object_put(ce->state->obj); 1411 } 1412 1413 static void execlists_context_destroy(struct kref *kref) 1414 { 1415 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 1416 1417 GEM_BUG_ON(intel_context_is_pinned(ce)); 1418 1419 if (ce->state) 1420 __execlists_context_fini(ce); 1421 1422 intel_context_free(ce); 1423 } 1424 1425 static int __context_pin(struct i915_vma *vma) 1426 { 1427 unsigned int flags; 1428 int err; 1429 1430 flags = PIN_GLOBAL | PIN_HIGH; 1431 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 1432 1433 err = i915_vma_pin(vma, 0, 0, flags); 1434 if (err) 1435 return err; 1436 1437 vma->obj->pin_global++; 1438 vma->obj->mm.dirty = true; 1439 1440 return 0; 1441 } 1442 1443 static void __context_unpin(struct i915_vma *vma) 1444 { 1445 vma->obj->pin_global--; 1446 __i915_vma_unpin(vma); 1447 } 1448 1449 static void execlists_context_unpin(struct intel_context *ce) 1450 { 1451 struct intel_engine_cs *engine; 1452 1453 /* 1454 * The tasklet may still be using a pointer to our state, via an 1455 * old request. However, since we know we only unpin the context 1456 * on retirement of the following request, we know that the last 1457 * request referencing us will have had a completion CS interrupt. 1458 * If we see that it is still active, it means that the tasklet hasn't 1459 * had the chance to run yet; let it run before we teardown the 1460 * reference it may use. 1461 */ 1462 engine = READ_ONCE(ce->active); 1463 if (unlikely(engine)) { 1464 unsigned long flags; 1465 1466 spin_lock_irqsave(&engine->timeline.lock, flags); 1467 process_csb(engine); 1468 spin_unlock_irqrestore(&engine->timeline.lock, flags); 1469 1470 GEM_BUG_ON(READ_ONCE(ce->active)); 1471 } 1472 1473 i915_gem_context_unpin_hw_id(ce->gem_context); 1474 1475 intel_ring_unpin(ce->ring); 1476 1477 i915_gem_object_unpin_map(ce->state->obj); 1478 __context_unpin(ce->state); 1479 } 1480 1481 static void 1482 __execlists_update_reg_state(struct intel_context *ce, 1483 struct intel_engine_cs *engine) 1484 { 1485 struct intel_ring *ring = ce->ring; 1486 u32 *regs = ce->lrc_reg_state; 1487 1488 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 1489 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 1490 1491 regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); 1492 regs[CTX_RING_HEAD + 1] = ring->head; 1493 regs[CTX_RING_TAIL + 1] = ring->tail; 1494 1495 /* RPCS */ 1496 if (engine->class == RENDER_CLASS) 1497 regs[CTX_R_PWR_CLK_STATE + 1] = 1498 intel_sseu_make_rpcs(engine->i915, &ce->sseu); 1499 } 1500 1501 static int 1502 __execlists_context_pin(struct intel_context *ce, 1503 struct intel_engine_cs *engine) 1504 { 1505 void *vaddr; 1506 int ret; 1507 1508 GEM_BUG_ON(!ce->gem_context->ppgtt); 1509 1510 ret = execlists_context_deferred_alloc(ce, engine); 1511 if (ret) 1512 goto err; 1513 GEM_BUG_ON(!ce->state); 1514 1515 ret = __context_pin(ce->state); 1516 if (ret) 1517 goto err; 1518 1519 vaddr = i915_gem_object_pin_map(ce->state->obj, 1520 i915_coherent_map_type(engine->i915) | 1521 I915_MAP_OVERRIDE); 1522 if (IS_ERR(vaddr)) { 1523 ret = PTR_ERR(vaddr); 1524 goto unpin_vma; 1525 } 1526 1527 ret = intel_ring_pin(ce->ring); 1528 if (ret) 1529 goto unpin_map; 1530 1531 ret = i915_gem_context_pin_hw_id(ce->gem_context); 1532 if (ret) 1533 goto unpin_ring; 1534 1535 ce->lrc_desc = lrc_descriptor(ce, engine); 1536 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 1537 __execlists_update_reg_state(ce, engine); 1538 1539 return 0; 1540 1541 unpin_ring: 1542 intel_ring_unpin(ce->ring); 1543 unpin_map: 1544 i915_gem_object_unpin_map(ce->state->obj); 1545 unpin_vma: 1546 __context_unpin(ce->state); 1547 err: 1548 return ret; 1549 } 1550 1551 static int execlists_context_pin(struct intel_context *ce) 1552 { 1553 return __execlists_context_pin(ce, ce->engine); 1554 } 1555 1556 static void execlists_context_reset(struct intel_context *ce) 1557 { 1558 /* 1559 * Because we emit WA_TAIL_DWORDS there may be a disparity 1560 * between our bookkeeping in ce->ring->head and ce->ring->tail and 1561 * that stored in context. As we only write new commands from 1562 * ce->ring->tail onwards, everything before that is junk. If the GPU 1563 * starts reading from its RING_HEAD from the context, it may try to 1564 * execute that junk and die. 1565 * 1566 * The contexts that are stilled pinned on resume belong to the 1567 * kernel, and are local to each engine. All other contexts will 1568 * have their head/tail sanitized upon pinning before use, so they 1569 * will never see garbage, 1570 * 1571 * So to avoid that we reset the context images upon resume. For 1572 * simplicity, we just zero everything out. 1573 */ 1574 intel_ring_reset(ce->ring, 0); 1575 __execlists_update_reg_state(ce, ce->engine); 1576 } 1577 1578 static const struct intel_context_ops execlists_context_ops = { 1579 .pin = execlists_context_pin, 1580 .unpin = execlists_context_unpin, 1581 1582 .enter = intel_context_enter_engine, 1583 .exit = intel_context_exit_engine, 1584 1585 .reset = execlists_context_reset, 1586 .destroy = execlists_context_destroy, 1587 }; 1588 1589 static int gen8_emit_init_breadcrumb(struct i915_request *rq) 1590 { 1591 u32 *cs; 1592 1593 GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); 1594 1595 cs = intel_ring_begin(rq, 6); 1596 if (IS_ERR(cs)) 1597 return PTR_ERR(cs); 1598 1599 /* 1600 * Check if we have been preempted before we even get started. 1601 * 1602 * After this point i915_request_started() reports true, even if 1603 * we get preempted and so are no longer running. 1604 */ 1605 *cs++ = MI_ARB_CHECK; 1606 *cs++ = MI_NOOP; 1607 1608 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1609 *cs++ = rq->timeline->hwsp_offset; 1610 *cs++ = 0; 1611 *cs++ = rq->fence.seqno - 1; 1612 1613 intel_ring_advance(rq, cs); 1614 1615 /* Record the updated position of the request's payload */ 1616 rq->infix = intel_ring_offset(rq, cs); 1617 1618 return 0; 1619 } 1620 1621 static int emit_pdps(struct i915_request *rq) 1622 { 1623 const struct intel_engine_cs * const engine = rq->engine; 1624 struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt; 1625 int err, i; 1626 u32 *cs; 1627 1628 GEM_BUG_ON(intel_vgpu_active(rq->i915)); 1629 1630 /* 1631 * Beware ye of the dragons, this sequence is magic! 1632 * 1633 * Small changes to this sequence can cause anything from 1634 * GPU hangs to forcewake errors and machine lockups! 1635 */ 1636 1637 /* Flush any residual operations from the context load */ 1638 err = engine->emit_flush(rq, EMIT_FLUSH); 1639 if (err) 1640 return err; 1641 1642 /* Magic required to prevent forcewake errors! */ 1643 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1644 if (err) 1645 return err; 1646 1647 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1648 if (IS_ERR(cs)) 1649 return PTR_ERR(cs); 1650 1651 /* Ensure the LRI have landed before we invalidate & continue */ 1652 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1653 for (i = GEN8_3LVL_PDPES; i--; ) { 1654 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1655 u32 base = engine->mmio_base; 1656 1657 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1658 *cs++ = upper_32_bits(pd_daddr); 1659 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1660 *cs++ = lower_32_bits(pd_daddr); 1661 } 1662 *cs++ = MI_NOOP; 1663 1664 intel_ring_advance(rq, cs); 1665 1666 /* Be doubly sure the LRI have landed before proceeding */ 1667 err = engine->emit_flush(rq, EMIT_FLUSH); 1668 if (err) 1669 return err; 1670 1671 /* Re-invalidate the TLB for luck */ 1672 return engine->emit_flush(rq, EMIT_INVALIDATE); 1673 } 1674 1675 static int execlists_request_alloc(struct i915_request *request) 1676 { 1677 int ret; 1678 1679 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); 1680 1681 /* 1682 * Flush enough space to reduce the likelihood of waiting after 1683 * we start building the request - in which case we will just 1684 * have to repeat work. 1685 */ 1686 request->reserved_space += EXECLISTS_REQUEST_SIZE; 1687 1688 /* 1689 * Note that after this point, we have committed to using 1690 * this request as it is being used to both track the 1691 * state of engine initialisation and liveness of the 1692 * golden renderstate above. Think twice before you try 1693 * to cancel/unwind this request now. 1694 */ 1695 1696 /* Unconditionally invalidate GPU caches and TLBs. */ 1697 if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm)) 1698 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1699 else 1700 ret = emit_pdps(request); 1701 if (ret) 1702 return ret; 1703 1704 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 1705 return 0; 1706 } 1707 1708 /* 1709 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 1710 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 1711 * but there is a slight complication as this is applied in WA batch where the 1712 * values are only initialized once so we cannot take register value at the 1713 * beginning and reuse it further; hence we save its value to memory, upload a 1714 * constant value with bit21 set and then we restore it back with the saved value. 1715 * To simplify the WA, a constant value is formed by using the default value 1716 * of this register. This shouldn't be a problem because we are only modifying 1717 * it for a short period and this batch in non-premptible. We can ofcourse 1718 * use additional instructions that read the actual value of the register 1719 * at that time and set our bit of interest but it makes the WA complicated. 1720 * 1721 * This WA is also required for Gen9 so extracting as a function avoids 1722 * code duplication. 1723 */ 1724 static u32 * 1725 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 1726 { 1727 /* NB no one else is allowed to scribble over scratch + 256! */ 1728 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1729 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1730 *batch++ = i915_scratch_offset(engine->i915) + 256; 1731 *batch++ = 0; 1732 1733 *batch++ = MI_LOAD_REGISTER_IMM(1); 1734 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1735 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; 1736 1737 batch = gen8_emit_pipe_control(batch, 1738 PIPE_CONTROL_CS_STALL | 1739 PIPE_CONTROL_DC_FLUSH_ENABLE, 1740 0); 1741 1742 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 1743 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 1744 *batch++ = i915_scratch_offset(engine->i915) + 256; 1745 *batch++ = 0; 1746 1747 return batch; 1748 } 1749 1750 /* 1751 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 1752 * initialized at the beginning and shared across all contexts but this field 1753 * helps us to have multiple batches at different offsets and select them based 1754 * on a criteria. At the moment this batch always start at the beginning of the page 1755 * and at this point we don't have multiple wa_ctx batch buffers. 1756 * 1757 * The number of WA applied are not known at the beginning; we use this field 1758 * to return the no of DWORDS written. 1759 * 1760 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 1761 * so it adds NOOPs as padding to make it cacheline aligned. 1762 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 1763 * makes a complete batch buffer. 1764 */ 1765 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1766 { 1767 /* WaDisableCtxRestoreArbitration:bdw,chv */ 1768 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1769 1770 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1771 if (IS_BROADWELL(engine->i915)) 1772 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1773 1774 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 1775 /* Actual scratch location is at 128 bytes offset */ 1776 batch = gen8_emit_pipe_control(batch, 1777 PIPE_CONTROL_FLUSH_L3 | 1778 PIPE_CONTROL_GLOBAL_GTT_IVB | 1779 PIPE_CONTROL_CS_STALL | 1780 PIPE_CONTROL_QW_WRITE, 1781 i915_scratch_offset(engine->i915) + 1782 2 * CACHELINE_BYTES); 1783 1784 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1785 1786 /* Pad to end of cacheline */ 1787 while ((unsigned long)batch % CACHELINE_BYTES) 1788 *batch++ = MI_NOOP; 1789 1790 /* 1791 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 1792 * execution depends on the length specified in terms of cache lines 1793 * in the register CTX_RCS_INDIRECT_CTX 1794 */ 1795 1796 return batch; 1797 } 1798 1799 struct lri { 1800 i915_reg_t reg; 1801 u32 value; 1802 }; 1803 1804 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) 1805 { 1806 GEM_BUG_ON(!count || count > 63); 1807 1808 *batch++ = MI_LOAD_REGISTER_IMM(count); 1809 do { 1810 *batch++ = i915_mmio_reg_offset(lri->reg); 1811 *batch++ = lri->value; 1812 } while (lri++, --count); 1813 *batch++ = MI_NOOP; 1814 1815 return batch; 1816 } 1817 1818 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1819 { 1820 static const struct lri lri[] = { 1821 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1822 { 1823 COMMON_SLICE_CHICKEN2, 1824 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, 1825 0), 1826 }, 1827 1828 /* BSpec: 11391 */ 1829 { 1830 FF_SLICE_CHICKEN, 1831 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, 1832 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), 1833 }, 1834 1835 /* BSpec: 11299 */ 1836 { 1837 _3D_CHICKEN3, 1838 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, 1839 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), 1840 } 1841 }; 1842 1843 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1844 1845 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1846 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1847 1848 batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); 1849 1850 /* WaMediaPoolStateCmdInWABB:bxt,glk */ 1851 if (HAS_POOLED_EU(engine->i915)) { 1852 /* 1853 * EU pool configuration is setup along with golden context 1854 * during context initialization. This value depends on 1855 * device type (2x6 or 3x6) and needs to be updated based 1856 * on which subslice is disabled especially for 2x6 1857 * devices, however it is safe to load default 1858 * configuration of 3x6 device instead of masking off 1859 * corresponding bits because HW ignores bits of a disabled 1860 * subslice and drops down to appropriate config. Please 1861 * see render_state_setup() in i915_gem_render_state.c for 1862 * possible configurations, to avoid duplication they are 1863 * not shown here again. 1864 */ 1865 *batch++ = GEN9_MEDIA_POOL_STATE; 1866 *batch++ = GEN9_MEDIA_POOL_ENABLE; 1867 *batch++ = 0x00777000; 1868 *batch++ = 0; 1869 *batch++ = 0; 1870 *batch++ = 0; 1871 } 1872 1873 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1874 1875 /* Pad to end of cacheline */ 1876 while ((unsigned long)batch % CACHELINE_BYTES) 1877 *batch++ = MI_NOOP; 1878 1879 return batch; 1880 } 1881 1882 static u32 * 1883 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 1884 { 1885 int i; 1886 1887 /* 1888 * WaPipeControlBefore3DStateSamplePattern: cnl 1889 * 1890 * Ensure the engine is idle prior to programming a 1891 * 3DSTATE_SAMPLE_PATTERN during a context restore. 1892 */ 1893 batch = gen8_emit_pipe_control(batch, 1894 PIPE_CONTROL_CS_STALL, 1895 0); 1896 /* 1897 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for 1898 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in 1899 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is 1900 * confusing. Since gen8_emit_pipe_control() already advances the 1901 * batch by 6 dwords, we advance the other 10 here, completing a 1902 * cacheline. It's not clear if the workaround requires this padding 1903 * before other commands, or if it's just the regular padding we would 1904 * already have for the workaround bb, so leave it here for now. 1905 */ 1906 for (i = 0; i < 10; i++) 1907 *batch++ = MI_NOOP; 1908 1909 /* Pad to end of cacheline */ 1910 while ((unsigned long)batch % CACHELINE_BYTES) 1911 *batch++ = MI_NOOP; 1912 1913 return batch; 1914 } 1915 1916 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) 1917 1918 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) 1919 { 1920 struct drm_i915_gem_object *obj; 1921 struct i915_vma *vma; 1922 int err; 1923 1924 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); 1925 if (IS_ERR(obj)) 1926 return PTR_ERR(obj); 1927 1928 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); 1929 if (IS_ERR(vma)) { 1930 err = PTR_ERR(vma); 1931 goto err; 1932 } 1933 1934 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); 1935 if (err) 1936 goto err; 1937 1938 engine->wa_ctx.vma = vma; 1939 return 0; 1940 1941 err: 1942 i915_gem_object_put(obj); 1943 return err; 1944 } 1945 1946 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) 1947 { 1948 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1949 } 1950 1951 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); 1952 1953 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 1954 { 1955 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 1956 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, 1957 &wa_ctx->per_ctx }; 1958 wa_bb_func_t wa_bb_fn[2]; 1959 struct page *page; 1960 void *batch, *batch_ptr; 1961 unsigned int i; 1962 int ret; 1963 1964 if (engine->class != RENDER_CLASS) 1965 return 0; 1966 1967 switch (INTEL_GEN(engine->i915)) { 1968 case 11: 1969 return 0; 1970 case 10: 1971 wa_bb_fn[0] = gen10_init_indirectctx_bb; 1972 wa_bb_fn[1] = NULL; 1973 break; 1974 case 9: 1975 wa_bb_fn[0] = gen9_init_indirectctx_bb; 1976 wa_bb_fn[1] = NULL; 1977 break; 1978 case 8: 1979 wa_bb_fn[0] = gen8_init_indirectctx_bb; 1980 wa_bb_fn[1] = NULL; 1981 break; 1982 default: 1983 MISSING_CASE(INTEL_GEN(engine->i915)); 1984 return 0; 1985 } 1986 1987 ret = lrc_setup_wa_ctx(engine); 1988 if (ret) { 1989 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); 1990 return ret; 1991 } 1992 1993 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); 1994 batch = batch_ptr = kmap_atomic(page); 1995 1996 /* 1997 * Emit the two workaround batch buffers, recording the offset from the 1998 * start of the workaround batch buffer object for each and their 1999 * respective sizes. 2000 */ 2001 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 2002 wa_bb[i]->offset = batch_ptr - batch; 2003 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 2004 CACHELINE_BYTES))) { 2005 ret = -EINVAL; 2006 break; 2007 } 2008 if (wa_bb_fn[i]) 2009 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 2010 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 2011 } 2012 2013 BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); 2014 2015 kunmap_atomic(batch); 2016 if (ret) 2017 lrc_destroy_wa_ctx(engine); 2018 2019 return ret; 2020 } 2021 2022 static void enable_execlists(struct intel_engine_cs *engine) 2023 { 2024 struct drm_i915_private *dev_priv = engine->i915; 2025 2026 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ 2027 2028 if (INTEL_GEN(dev_priv) >= 11) 2029 I915_WRITE(RING_MODE_GEN7(engine), 2030 _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); 2031 else 2032 I915_WRITE(RING_MODE_GEN7(engine), 2033 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 2034 2035 I915_WRITE(RING_MI_MODE(engine->mmio_base), 2036 _MASKED_BIT_DISABLE(STOP_RING)); 2037 2038 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 2039 i915_ggtt_offset(engine->status_page.vma)); 2040 POSTING_READ(RING_HWS_PGA(engine->mmio_base)); 2041 } 2042 2043 static bool unexpected_starting_state(struct intel_engine_cs *engine) 2044 { 2045 struct drm_i915_private *dev_priv = engine->i915; 2046 bool unexpected = false; 2047 2048 if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) { 2049 DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n"); 2050 unexpected = true; 2051 } 2052 2053 return unexpected; 2054 } 2055 2056 static int execlists_resume(struct intel_engine_cs *engine) 2057 { 2058 intel_engine_apply_workarounds(engine); 2059 intel_engine_apply_whitelist(engine); 2060 2061 intel_mocs_init_engine(engine); 2062 2063 intel_engine_reset_breadcrumbs(engine); 2064 2065 if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { 2066 struct drm_printer p = drm_debug_printer(__func__); 2067 2068 intel_engine_dump(engine, &p, NULL); 2069 } 2070 2071 enable_execlists(engine); 2072 2073 return 0; 2074 } 2075 2076 static void execlists_reset_prepare(struct intel_engine_cs *engine) 2077 { 2078 struct intel_engine_execlists * const execlists = &engine->execlists; 2079 unsigned long flags; 2080 2081 GEM_TRACE("%s: depth<-%d\n", engine->name, 2082 atomic_read(&execlists->tasklet.count)); 2083 2084 /* 2085 * Prevent request submission to the hardware until we have 2086 * completed the reset in i915_gem_reset_finish(). If a request 2087 * is completed by one engine, it may then queue a request 2088 * to a second via its execlists->tasklet *just* as we are 2089 * calling engine->resume() and also writing the ELSP. 2090 * Turning off the execlists->tasklet until the reset is over 2091 * prevents the race. 2092 */ 2093 __tasklet_disable_sync_once(&execlists->tasklet); 2094 GEM_BUG_ON(!reset_in_progress(execlists)); 2095 2096 intel_engine_stop_cs(engine); 2097 2098 /* And flush any current direct submission. */ 2099 spin_lock_irqsave(&engine->timeline.lock, flags); 2100 spin_unlock_irqrestore(&engine->timeline.lock, flags); 2101 } 2102 2103 static bool lrc_regs_ok(const struct i915_request *rq) 2104 { 2105 const struct intel_ring *ring = rq->ring; 2106 const u32 *regs = rq->hw_context->lrc_reg_state; 2107 2108 /* Quick spot check for the common signs of context corruption */ 2109 2110 if (regs[CTX_RING_BUFFER_CONTROL + 1] != 2111 (RING_CTL_SIZE(ring->size) | RING_VALID)) 2112 return false; 2113 2114 if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma)) 2115 return false; 2116 2117 return true; 2118 } 2119 2120 static void reset_csb_pointers(struct intel_engine_execlists *execlists) 2121 { 2122 const unsigned int reset_value = execlists->csb_size - 1; 2123 2124 /* 2125 * After a reset, the HW starts writing into CSB entry [0]. We 2126 * therefore have to set our HEAD pointer back one entry so that 2127 * the *first* entry we check is entry 0. To complicate this further, 2128 * as we don't wait for the first interrupt after reset, we have to 2129 * fake the HW write to point back to the last entry so that our 2130 * inline comparison of our cached head position against the last HW 2131 * write works even before the first interrupt. 2132 */ 2133 execlists->csb_head = reset_value; 2134 WRITE_ONCE(*execlists->csb_write, reset_value); 2135 wmb(); /* Make sure this is visible to HW (paranoia?) */ 2136 2137 invalidate_csb_entries(&execlists->csb_status[0], 2138 &execlists->csb_status[reset_value]); 2139 } 2140 2141 static struct i915_request *active_request(struct i915_request *rq) 2142 { 2143 const struct list_head * const list = &rq->engine->timeline.requests; 2144 const struct intel_context * const context = rq->hw_context; 2145 struct i915_request *active = NULL; 2146 2147 list_for_each_entry_from_reverse(rq, list, link) { 2148 if (i915_request_completed(rq)) 2149 break; 2150 2151 if (rq->hw_context != context) 2152 break; 2153 2154 active = rq; 2155 } 2156 2157 return active; 2158 } 2159 2160 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) 2161 { 2162 struct intel_engine_execlists * const execlists = &engine->execlists; 2163 struct intel_context *ce; 2164 struct i915_request *rq; 2165 u32 *regs; 2166 2167 process_csb(engine); /* drain preemption events */ 2168 2169 /* Following the reset, we need to reload the CSB read/write pointers */ 2170 reset_csb_pointers(&engine->execlists); 2171 2172 /* 2173 * Save the currently executing context, even if we completed 2174 * its request, it was still running at the time of the 2175 * reset and will have been clobbered. 2176 */ 2177 if (!port_isset(execlists->port)) 2178 goto out_clear; 2179 2180 rq = port_request(execlists->port); 2181 ce = rq->hw_context; 2182 2183 /* 2184 * Catch up with any missed context-switch interrupts. 2185 * 2186 * Ideally we would just read the remaining CSB entries now that we 2187 * know the gpu is idle. However, the CSB registers are sometimes^W 2188 * often trashed across a GPU reset! Instead we have to rely on 2189 * guessing the missed context-switch events by looking at what 2190 * requests were completed. 2191 */ 2192 execlists_cancel_port_requests(execlists); 2193 2194 rq = active_request(rq); 2195 if (!rq) 2196 goto out_replay; 2197 2198 /* 2199 * If this request hasn't started yet, e.g. it is waiting on a 2200 * semaphore, we need to avoid skipping the request or else we 2201 * break the signaling chain. However, if the context is corrupt 2202 * the request will not restart and we will be stuck with a wedged 2203 * device. It is quite often the case that if we issue a reset 2204 * while the GPU is loading the context image, that the context 2205 * image becomes corrupt. 2206 * 2207 * Otherwise, if we have not started yet, the request should replay 2208 * perfectly and we do not need to flag the result as being erroneous. 2209 */ 2210 if (!i915_request_started(rq) && lrc_regs_ok(rq)) 2211 goto out_replay; 2212 2213 /* 2214 * If the request was innocent, we leave the request in the ELSP 2215 * and will try to replay it on restarting. The context image may 2216 * have been corrupted by the reset, in which case we may have 2217 * to service a new GPU hang, but more likely we can continue on 2218 * without impact. 2219 * 2220 * If the request was guilty, we presume the context is corrupt 2221 * and have to at least restore the RING register in the context 2222 * image back to the expected values to skip over the guilty request. 2223 */ 2224 i915_reset_request(rq, stalled); 2225 if (!stalled && lrc_regs_ok(rq)) 2226 goto out_replay; 2227 2228 /* 2229 * We want a simple context + ring to execute the breadcrumb update. 2230 * We cannot rely on the context being intact across the GPU hang, 2231 * so clear it and rebuild just what we need for the breadcrumb. 2232 * All pending requests for this context will be zapped, and any 2233 * future request will be after userspace has had the opportunity 2234 * to recreate its own state. 2235 */ 2236 regs = ce->lrc_reg_state; 2237 if (engine->pinned_default_state) { 2238 memcpy(regs, /* skip restoring the vanilla PPHWSP */ 2239 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, 2240 engine->context_size - PAGE_SIZE); 2241 } 2242 execlists_init_reg_state(regs, ce, engine, ce->ring); 2243 2244 out_replay: 2245 /* Rerun the request; its payload has been neutered (if guilty). */ 2246 ce->ring->head = 2247 rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail; 2248 intel_ring_update_space(ce->ring); 2249 __execlists_update_reg_state(ce, engine); 2250 2251 /* Push back any incomplete requests for replay after the reset. */ 2252 __unwind_incomplete_requests(engine); 2253 2254 out_clear: 2255 execlists_clear_all_active(execlists); 2256 } 2257 2258 static void execlists_reset(struct intel_engine_cs *engine, bool stalled) 2259 { 2260 unsigned long flags; 2261 2262 GEM_TRACE("%s\n", engine->name); 2263 2264 spin_lock_irqsave(&engine->timeline.lock, flags); 2265 2266 __execlists_reset(engine, stalled); 2267 2268 spin_unlock_irqrestore(&engine->timeline.lock, flags); 2269 } 2270 2271 static void nop_submission_tasklet(unsigned long data) 2272 { 2273 /* The driver is wedged; don't process any more events. */ 2274 } 2275 2276 static void execlists_cancel_requests(struct intel_engine_cs *engine) 2277 { 2278 struct intel_engine_execlists * const execlists = &engine->execlists; 2279 struct i915_request *rq, *rn; 2280 struct rb_node *rb; 2281 unsigned long flags; 2282 2283 GEM_TRACE("%s\n", engine->name); 2284 2285 /* 2286 * Before we call engine->cancel_requests(), we should have exclusive 2287 * access to the submission state. This is arranged for us by the 2288 * caller disabling the interrupt generation, the tasklet and other 2289 * threads that may then access the same state, giving us a free hand 2290 * to reset state. However, we still need to let lockdep be aware that 2291 * we know this state may be accessed in hardirq context, so we 2292 * disable the irq around this manipulation and we want to keep 2293 * the spinlock focused on its duties and not accidentally conflate 2294 * coverage to the submission's irq state. (Similarly, although we 2295 * shouldn't need to disable irq around the manipulation of the 2296 * submission's irq state, we also wish to remind ourselves that 2297 * it is irq state.) 2298 */ 2299 spin_lock_irqsave(&engine->timeline.lock, flags); 2300 2301 __execlists_reset(engine, true); 2302 2303 /* Mark all executing requests as skipped. */ 2304 list_for_each_entry(rq, &engine->timeline.requests, link) { 2305 if (!i915_request_signaled(rq)) 2306 dma_fence_set_error(&rq->fence, -EIO); 2307 2308 i915_request_mark_complete(rq); 2309 } 2310 2311 /* Flush the queued requests to the timeline list (for retiring). */ 2312 while ((rb = rb_first_cached(&execlists->queue))) { 2313 struct i915_priolist *p = to_priolist(rb); 2314 int i; 2315 2316 priolist_for_each_request_consume(rq, rn, p, i) { 2317 list_del_init(&rq->sched.link); 2318 __i915_request_submit(rq); 2319 dma_fence_set_error(&rq->fence, -EIO); 2320 i915_request_mark_complete(rq); 2321 } 2322 2323 rb_erase_cached(&p->node, &execlists->queue); 2324 i915_priolist_free(p); 2325 } 2326 2327 /* Cancel all attached virtual engines */ 2328 while ((rb = rb_first_cached(&execlists->virtual))) { 2329 struct virtual_engine *ve = 2330 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2331 2332 rb_erase_cached(rb, &execlists->virtual); 2333 RB_CLEAR_NODE(rb); 2334 2335 spin_lock(&ve->base.timeline.lock); 2336 if (ve->request) { 2337 ve->request->engine = engine; 2338 __i915_request_submit(ve->request); 2339 dma_fence_set_error(&ve->request->fence, -EIO); 2340 i915_request_mark_complete(ve->request); 2341 ve->base.execlists.queue_priority_hint = INT_MIN; 2342 ve->request = NULL; 2343 } 2344 spin_unlock(&ve->base.timeline.lock); 2345 } 2346 2347 /* Remaining _unready_ requests will be nop'ed when submitted */ 2348 2349 execlists->queue_priority_hint = INT_MIN; 2350 execlists->queue = RB_ROOT_CACHED; 2351 GEM_BUG_ON(port_isset(execlists->port)); 2352 2353 GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); 2354 execlists->tasklet.func = nop_submission_tasklet; 2355 2356 spin_unlock_irqrestore(&engine->timeline.lock, flags); 2357 } 2358 2359 static void execlists_reset_finish(struct intel_engine_cs *engine) 2360 { 2361 struct intel_engine_execlists * const execlists = &engine->execlists; 2362 2363 /* 2364 * After a GPU reset, we may have requests to replay. Do so now while 2365 * we still have the forcewake to be sure that the GPU is not allowed 2366 * to sleep before we restart and reload a context. 2367 */ 2368 GEM_BUG_ON(!reset_in_progress(execlists)); 2369 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) 2370 execlists->tasklet.func(execlists->tasklet.data); 2371 2372 if (__tasklet_enable(&execlists->tasklet)) 2373 /* And kick in case we missed a new request submission. */ 2374 tasklet_hi_schedule(&execlists->tasklet); 2375 GEM_TRACE("%s: depth->%d\n", engine->name, 2376 atomic_read(&execlists->tasklet.count)); 2377 } 2378 2379 static int gen8_emit_bb_start(struct i915_request *rq, 2380 u64 offset, u32 len, 2381 const unsigned int flags) 2382 { 2383 u32 *cs; 2384 2385 cs = intel_ring_begin(rq, 4); 2386 if (IS_ERR(cs)) 2387 return PTR_ERR(cs); 2388 2389 /* 2390 * WaDisableCtxRestoreArbitration:bdw,chv 2391 * 2392 * We don't need to perform MI_ARB_ENABLE as often as we do (in 2393 * particular all the gen that do not need the w/a at all!), if we 2394 * took care to make sure that on every switch into this context 2395 * (both ordinary and for preemption) that arbitrartion was enabled 2396 * we would be fine. However, for gen8 there is another w/a that 2397 * requires us to not preempt inside GPGPU execution, so we keep 2398 * arbitration disabled for gen8 batches. Arbitration will be 2399 * re-enabled before we close the request 2400 * (engine->emit_fini_breadcrumb). 2401 */ 2402 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 2403 2404 /* FIXME(BDW+): Address space and security selectors. */ 2405 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 2406 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 2407 *cs++ = lower_32_bits(offset); 2408 *cs++ = upper_32_bits(offset); 2409 2410 intel_ring_advance(rq, cs); 2411 2412 return 0; 2413 } 2414 2415 static int gen9_emit_bb_start(struct i915_request *rq, 2416 u64 offset, u32 len, 2417 const unsigned int flags) 2418 { 2419 u32 *cs; 2420 2421 cs = intel_ring_begin(rq, 6); 2422 if (IS_ERR(cs)) 2423 return PTR_ERR(cs); 2424 2425 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2426 2427 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 2428 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 2429 *cs++ = lower_32_bits(offset); 2430 *cs++ = upper_32_bits(offset); 2431 2432 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 2433 *cs++ = MI_NOOP; 2434 2435 intel_ring_advance(rq, cs); 2436 2437 return 0; 2438 } 2439 2440 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 2441 { 2442 ENGINE_WRITE(engine, RING_IMR, 2443 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 2444 ENGINE_POSTING_READ(engine, RING_IMR); 2445 } 2446 2447 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 2448 { 2449 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 2450 } 2451 2452 static int gen8_emit_flush(struct i915_request *request, u32 mode) 2453 { 2454 u32 cmd, *cs; 2455 2456 cs = intel_ring_begin(request, 4); 2457 if (IS_ERR(cs)) 2458 return PTR_ERR(cs); 2459 2460 cmd = MI_FLUSH_DW + 1; 2461 2462 /* We always require a command barrier so that subsequent 2463 * commands, such as breadcrumb interrupts, are strictly ordered 2464 * wrt the contents of the write cache being flushed to memory 2465 * (and thus being coherent from the CPU). 2466 */ 2467 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2468 2469 if (mode & EMIT_INVALIDATE) { 2470 cmd |= MI_INVALIDATE_TLB; 2471 if (request->engine->class == VIDEO_DECODE_CLASS) 2472 cmd |= MI_INVALIDATE_BSD; 2473 } 2474 2475 *cs++ = cmd; 2476 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 2477 *cs++ = 0; /* upper addr */ 2478 *cs++ = 0; /* value */ 2479 intel_ring_advance(request, cs); 2480 2481 return 0; 2482 } 2483 2484 static int gen8_emit_flush_render(struct i915_request *request, 2485 u32 mode) 2486 { 2487 struct intel_engine_cs *engine = request->engine; 2488 u32 scratch_addr = 2489 i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES; 2490 bool vf_flush_wa = false, dc_flush_wa = false; 2491 u32 *cs, flags = 0; 2492 int len; 2493 2494 flags |= PIPE_CONTROL_CS_STALL; 2495 2496 if (mode & EMIT_FLUSH) { 2497 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 2498 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 2499 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 2500 flags |= PIPE_CONTROL_FLUSH_ENABLE; 2501 } 2502 2503 if (mode & EMIT_INVALIDATE) { 2504 flags |= PIPE_CONTROL_TLB_INVALIDATE; 2505 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 2506 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 2507 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 2508 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 2509 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 2510 flags |= PIPE_CONTROL_QW_WRITE; 2511 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 2512 2513 /* 2514 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 2515 * pipe control. 2516 */ 2517 if (IS_GEN(request->i915, 9)) 2518 vf_flush_wa = true; 2519 2520 /* WaForGAMHang:kbl */ 2521 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0)) 2522 dc_flush_wa = true; 2523 } 2524 2525 len = 6; 2526 2527 if (vf_flush_wa) 2528 len += 6; 2529 2530 if (dc_flush_wa) 2531 len += 12; 2532 2533 cs = intel_ring_begin(request, len); 2534 if (IS_ERR(cs)) 2535 return PTR_ERR(cs); 2536 2537 if (vf_flush_wa) 2538 cs = gen8_emit_pipe_control(cs, 0, 0); 2539 2540 if (dc_flush_wa) 2541 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 2542 0); 2543 2544 cs = gen8_emit_pipe_control(cs, flags, scratch_addr); 2545 2546 if (dc_flush_wa) 2547 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); 2548 2549 intel_ring_advance(request, cs); 2550 2551 return 0; 2552 } 2553 2554 /* 2555 * Reserve space for 2 NOOPs at the end of each request to be 2556 * used as a workaround for not being allowed to do lite 2557 * restore with HEAD==TAIL (WaIdleLiteRestore). 2558 */ 2559 static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 2560 { 2561 /* Ensure there's always at least one preemption point per-request. */ 2562 *cs++ = MI_ARB_CHECK; 2563 *cs++ = MI_NOOP; 2564 request->wa_tail = intel_ring_offset(request, cs); 2565 2566 return cs; 2567 } 2568 2569 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) 2570 { 2571 cs = gen8_emit_ggtt_write(cs, 2572 request->fence.seqno, 2573 request->timeline->hwsp_offset, 2574 0); 2575 2576 *cs++ = MI_USER_INTERRUPT; 2577 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2578 2579 request->tail = intel_ring_offset(request, cs); 2580 assert_ring_tail_valid(request->ring, request->tail); 2581 2582 return gen8_emit_wa_tail(request, cs); 2583 } 2584 2585 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 2586 { 2587 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ 2588 cs = gen8_emit_ggtt_write_rcs(cs, 2589 request->fence.seqno, 2590 request->timeline->hwsp_offset, 2591 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 2592 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 2593 PIPE_CONTROL_DC_FLUSH_ENABLE); 2594 cs = gen8_emit_pipe_control(cs, 2595 PIPE_CONTROL_FLUSH_ENABLE | 2596 PIPE_CONTROL_CS_STALL, 2597 0); 2598 2599 *cs++ = MI_USER_INTERRUPT; 2600 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 2601 2602 request->tail = intel_ring_offset(request, cs); 2603 assert_ring_tail_valid(request->ring, request->tail); 2604 2605 return gen8_emit_wa_tail(request, cs); 2606 } 2607 2608 static int gen8_init_rcs_context(struct i915_request *rq) 2609 { 2610 int ret; 2611 2612 ret = intel_engine_emit_ctx_wa(rq); 2613 if (ret) 2614 return ret; 2615 2616 ret = intel_rcs_context_init_mocs(rq); 2617 /* 2618 * Failing to program the MOCS is non-fatal.The system will not 2619 * run at peak performance. So generate an error and carry on. 2620 */ 2621 if (ret) 2622 DRM_ERROR("MOCS failed to program: expect performance issues.\n"); 2623 2624 return i915_gem_render_state_emit(rq); 2625 } 2626 2627 static void execlists_park(struct intel_engine_cs *engine) 2628 { 2629 intel_engine_park(engine); 2630 } 2631 2632 void intel_execlists_set_default_submission(struct intel_engine_cs *engine) 2633 { 2634 engine->submit_request = execlists_submit_request; 2635 engine->cancel_requests = execlists_cancel_requests; 2636 engine->schedule = i915_schedule; 2637 engine->execlists.tasklet.func = execlists_submission_tasklet; 2638 2639 engine->reset.prepare = execlists_reset_prepare; 2640 engine->reset.reset = execlists_reset; 2641 engine->reset.finish = execlists_reset_finish; 2642 2643 engine->park = execlists_park; 2644 engine->unpark = NULL; 2645 2646 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 2647 if (!intel_vgpu_active(engine->i915)) 2648 engine->flags |= I915_ENGINE_HAS_SEMAPHORES; 2649 if (engine->preempt_context && 2650 HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 2651 engine->flags |= I915_ENGINE_HAS_PREEMPTION; 2652 } 2653 2654 static void execlists_destroy(struct intel_engine_cs *engine) 2655 { 2656 intel_engine_cleanup_common(engine); 2657 lrc_destroy_wa_ctx(engine); 2658 kfree(engine); 2659 } 2660 2661 static void 2662 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 2663 { 2664 /* Default vfuncs which can be overriden by each engine. */ 2665 2666 engine->destroy = execlists_destroy; 2667 engine->resume = execlists_resume; 2668 2669 engine->reset.prepare = execlists_reset_prepare; 2670 engine->reset.reset = execlists_reset; 2671 engine->reset.finish = execlists_reset_finish; 2672 2673 engine->cops = &execlists_context_ops; 2674 engine->request_alloc = execlists_request_alloc; 2675 2676 engine->emit_flush = gen8_emit_flush; 2677 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; 2678 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; 2679 2680 engine->set_default_submission = intel_execlists_set_default_submission; 2681 2682 if (INTEL_GEN(engine->i915) < 11) { 2683 engine->irq_enable = gen8_logical_ring_enable_irq; 2684 engine->irq_disable = gen8_logical_ring_disable_irq; 2685 } else { 2686 /* 2687 * TODO: On Gen11 interrupt masks need to be clear 2688 * to allow C6 entry. Keep interrupts enabled at 2689 * and take the hit of generating extra interrupts 2690 * until a more refined solution exists. 2691 */ 2692 } 2693 if (IS_GEN(engine->i915, 8)) 2694 engine->emit_bb_start = gen8_emit_bb_start; 2695 else 2696 engine->emit_bb_start = gen9_emit_bb_start; 2697 } 2698 2699 static inline void 2700 logical_ring_default_irqs(struct intel_engine_cs *engine) 2701 { 2702 unsigned int shift = 0; 2703 2704 if (INTEL_GEN(engine->i915) < 11) { 2705 const u8 irq_shifts[] = { 2706 [RCS0] = GEN8_RCS_IRQ_SHIFT, 2707 [BCS0] = GEN8_BCS_IRQ_SHIFT, 2708 [VCS0] = GEN8_VCS0_IRQ_SHIFT, 2709 [VCS1] = GEN8_VCS1_IRQ_SHIFT, 2710 [VECS0] = GEN8_VECS_IRQ_SHIFT, 2711 }; 2712 2713 shift = irq_shifts[engine->id]; 2714 } 2715 2716 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 2717 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 2718 } 2719 2720 int intel_execlists_submission_setup(struct intel_engine_cs *engine) 2721 { 2722 /* Intentionally left blank. */ 2723 engine->buffer = NULL; 2724 2725 tasklet_init(&engine->execlists.tasklet, 2726 execlists_submission_tasklet, (unsigned long)engine); 2727 2728 logical_ring_default_vfuncs(engine); 2729 logical_ring_default_irqs(engine); 2730 2731 if (engine->class == RENDER_CLASS) { 2732 engine->init_context = gen8_init_rcs_context; 2733 engine->emit_flush = gen8_emit_flush_render; 2734 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; 2735 } 2736 2737 return 0; 2738 } 2739 2740 int intel_execlists_submission_init(struct intel_engine_cs *engine) 2741 { 2742 struct drm_i915_private *i915 = engine->i915; 2743 struct intel_engine_execlists * const execlists = &engine->execlists; 2744 u32 base = engine->mmio_base; 2745 int ret; 2746 2747 ret = intel_engine_init_common(engine); 2748 if (ret) 2749 return ret; 2750 2751 intel_engine_init_workarounds(engine); 2752 intel_engine_init_whitelist(engine); 2753 2754 if (intel_init_workaround_bb(engine)) 2755 /* 2756 * We continue even if we fail to initialize WA batch 2757 * because we only expect rare glitches but nothing 2758 * critical to prevent us from using GPU 2759 */ 2760 DRM_ERROR("WA batch buffer initialization failed\n"); 2761 2762 if (HAS_LOGICAL_RING_ELSQ(i915)) { 2763 execlists->submit_reg = i915->uncore.regs + 2764 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); 2765 execlists->ctrl_reg = i915->uncore.regs + 2766 i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); 2767 } else { 2768 execlists->submit_reg = i915->uncore.regs + 2769 i915_mmio_reg_offset(RING_ELSP(base)); 2770 } 2771 2772 execlists->preempt_complete_status = ~0u; 2773 if (engine->preempt_context) 2774 execlists->preempt_complete_status = 2775 upper_32_bits(engine->preempt_context->lrc_desc); 2776 2777 execlists->csb_status = 2778 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 2779 2780 execlists->csb_write = 2781 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; 2782 2783 if (INTEL_GEN(engine->i915) < 11) 2784 execlists->csb_size = GEN8_CSB_ENTRIES; 2785 else 2786 execlists->csb_size = GEN11_CSB_ENTRIES; 2787 2788 reset_csb_pointers(execlists); 2789 2790 return 0; 2791 } 2792 2793 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) 2794 { 2795 u32 indirect_ctx_offset; 2796 2797 switch (INTEL_GEN(engine->i915)) { 2798 default: 2799 MISSING_CASE(INTEL_GEN(engine->i915)); 2800 /* fall through */ 2801 case 11: 2802 indirect_ctx_offset = 2803 GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2804 break; 2805 case 10: 2806 indirect_ctx_offset = 2807 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2808 break; 2809 case 9: 2810 indirect_ctx_offset = 2811 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2812 break; 2813 case 8: 2814 indirect_ctx_offset = 2815 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 2816 break; 2817 } 2818 2819 return indirect_ctx_offset; 2820 } 2821 2822 static void execlists_init_reg_state(u32 *regs, 2823 struct intel_context *ce, 2824 struct intel_engine_cs *engine, 2825 struct intel_ring *ring) 2826 { 2827 struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt; 2828 bool rcs = engine->class == RENDER_CLASS; 2829 u32 base = engine->mmio_base; 2830 2831 /* 2832 * A context is actually a big batch buffer with several 2833 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The 2834 * values we are setting here are only for the first context restore: 2835 * on a subsequent save, the GPU will recreate this batchbuffer with new 2836 * values (including all the missing MI_LOAD_REGISTER_IMM commands that 2837 * we are not initializing here). 2838 * 2839 * Must keep consistent with virtual_update_register_offsets(). 2840 */ 2841 regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | 2842 MI_LRI_FORCE_POSTED; 2843 2844 CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), 2845 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | 2846 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); 2847 if (INTEL_GEN(engine->i915) < 11) { 2848 regs[CTX_CONTEXT_CONTROL + 1] |= 2849 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | 2850 CTX_CTRL_RS_CTX_ENABLE); 2851 } 2852 CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); 2853 CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); 2854 CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); 2855 CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base), 2856 RING_CTL_SIZE(ring->size) | RING_VALID); 2857 CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0); 2858 CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0); 2859 CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT); 2860 CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); 2861 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); 2862 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); 2863 if (rcs) { 2864 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 2865 2866 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); 2867 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, 2868 RING_INDIRECT_CTX_OFFSET(base), 0); 2869 if (wa_ctx->indirect_ctx.size) { 2870 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 2871 2872 regs[CTX_RCS_INDIRECT_CTX + 1] = 2873 (ggtt_offset + wa_ctx->indirect_ctx.offset) | 2874 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); 2875 2876 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = 2877 intel_lr_indirect_ctx_offset(engine) << 6; 2878 } 2879 2880 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); 2881 if (wa_ctx->per_ctx.size) { 2882 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 2883 2884 regs[CTX_BB_PER_CTX_PTR + 1] = 2885 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 2886 } 2887 } 2888 2889 regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; 2890 2891 CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); 2892 /* PDP values well be assigned later if needed */ 2893 CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0); 2894 CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0); 2895 CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0); 2896 CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0); 2897 CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0); 2898 CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0); 2899 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0); 2900 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0); 2901 2902 if (i915_vm_is_4lvl(&ppgtt->vm)) { 2903 /* 64b PPGTT (48bit canonical) 2904 * PDP0_DESCRIPTOR contains the base address to PML4 and 2905 * other PDP Descriptors are ignored. 2906 */ 2907 ASSIGN_CTX_PML4(ppgtt, regs); 2908 } else { 2909 ASSIGN_CTX_PDP(ppgtt, regs, 3); 2910 ASSIGN_CTX_PDP(ppgtt, regs, 2); 2911 ASSIGN_CTX_PDP(ppgtt, regs, 1); 2912 ASSIGN_CTX_PDP(ppgtt, regs, 0); 2913 } 2914 2915 if (rcs) { 2916 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2917 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); 2918 2919 i915_oa_init_reg_state(engine, ce, regs); 2920 } 2921 2922 regs[CTX_END] = MI_BATCH_BUFFER_END; 2923 if (INTEL_GEN(engine->i915) >= 10) 2924 regs[CTX_END] |= BIT(0); 2925 } 2926 2927 static int 2928 populate_lr_context(struct intel_context *ce, 2929 struct drm_i915_gem_object *ctx_obj, 2930 struct intel_engine_cs *engine, 2931 struct intel_ring *ring) 2932 { 2933 void *vaddr; 2934 u32 *regs; 2935 int ret; 2936 2937 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 2938 if (IS_ERR(vaddr)) { 2939 ret = PTR_ERR(vaddr); 2940 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); 2941 return ret; 2942 } 2943 2944 if (engine->default_state) { 2945 /* 2946 * We only want to copy over the template context state; 2947 * skipping over the headers reserved for GuC communication, 2948 * leaving those as zero. 2949 */ 2950 const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE; 2951 void *defaults; 2952 2953 defaults = i915_gem_object_pin_map(engine->default_state, 2954 I915_MAP_WB); 2955 if (IS_ERR(defaults)) { 2956 ret = PTR_ERR(defaults); 2957 goto err_unpin_ctx; 2958 } 2959 2960 memcpy(vaddr + start, defaults + start, engine->context_size); 2961 i915_gem_object_unpin_map(engine->default_state); 2962 } 2963 2964 /* The second page of the context object contains some fields which must 2965 * be set up prior to the first execution. */ 2966 regs = vaddr + LRC_STATE_PN * PAGE_SIZE; 2967 execlists_init_reg_state(regs, ce, engine, ring); 2968 if (!engine->default_state) 2969 regs[CTX_CONTEXT_CONTROL + 1] |= 2970 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 2971 if (ce->gem_context == engine->i915->preempt_context && 2972 INTEL_GEN(engine->i915) < 11) 2973 regs[CTX_CONTEXT_CONTROL + 1] |= 2974 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2975 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); 2976 2977 ret = 0; 2978 err_unpin_ctx: 2979 __i915_gem_object_flush_map(ctx_obj, 2980 LRC_HEADER_PAGES * PAGE_SIZE, 2981 engine->context_size); 2982 i915_gem_object_unpin_map(ctx_obj); 2983 return ret; 2984 } 2985 2986 static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) 2987 { 2988 if (ctx->timeline) 2989 return i915_timeline_get(ctx->timeline); 2990 else 2991 return i915_timeline_create(ctx->i915, NULL); 2992 } 2993 2994 static int execlists_context_deferred_alloc(struct intel_context *ce, 2995 struct intel_engine_cs *engine) 2996 { 2997 struct drm_i915_gem_object *ctx_obj; 2998 struct i915_vma *vma; 2999 u32 context_size; 3000 struct intel_ring *ring; 3001 struct i915_timeline *timeline; 3002 int ret; 3003 3004 if (ce->state) 3005 return 0; 3006 3007 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 3008 3009 /* 3010 * Before the actual start of the context image, we insert a few pages 3011 * for our own use and for sharing with the GuC. 3012 */ 3013 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 3014 3015 ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); 3016 if (IS_ERR(ctx_obj)) 3017 return PTR_ERR(ctx_obj); 3018 3019 vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL); 3020 if (IS_ERR(vma)) { 3021 ret = PTR_ERR(vma); 3022 goto error_deref_obj; 3023 } 3024 3025 timeline = get_timeline(ce->gem_context); 3026 if (IS_ERR(timeline)) { 3027 ret = PTR_ERR(timeline); 3028 goto error_deref_obj; 3029 } 3030 3031 ring = intel_engine_create_ring(engine, 3032 timeline, 3033 ce->gem_context->ring_size); 3034 i915_timeline_put(timeline); 3035 if (IS_ERR(ring)) { 3036 ret = PTR_ERR(ring); 3037 goto error_deref_obj; 3038 } 3039 3040 ret = populate_lr_context(ce, ctx_obj, engine, ring); 3041 if (ret) { 3042 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 3043 goto error_ring_free; 3044 } 3045 3046 ce->ring = ring; 3047 ce->state = vma; 3048 3049 return 0; 3050 3051 error_ring_free: 3052 intel_ring_put(ring); 3053 error_deref_obj: 3054 i915_gem_object_put(ctx_obj); 3055 return ret; 3056 } 3057 3058 static void virtual_context_destroy(struct kref *kref) 3059 { 3060 struct virtual_engine *ve = 3061 container_of(kref, typeof(*ve), context.ref); 3062 unsigned int n; 3063 3064 GEM_BUG_ON(ve->request); 3065 GEM_BUG_ON(ve->context.active); 3066 3067 for (n = 0; n < ve->num_siblings; n++) { 3068 struct intel_engine_cs *sibling = ve->siblings[n]; 3069 struct rb_node *node = &ve->nodes[sibling->id].rb; 3070 3071 if (RB_EMPTY_NODE(node)) 3072 continue; 3073 3074 spin_lock_irq(&sibling->timeline.lock); 3075 3076 /* Detachment is lazily performed in the execlists tasklet */ 3077 if (!RB_EMPTY_NODE(node)) 3078 rb_erase_cached(node, &sibling->execlists.virtual); 3079 3080 spin_unlock_irq(&sibling->timeline.lock); 3081 } 3082 GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); 3083 3084 if (ve->context.state) 3085 __execlists_context_fini(&ve->context); 3086 3087 kfree(ve->bonds); 3088 3089 i915_timeline_fini(&ve->base.timeline); 3090 kfree(ve); 3091 } 3092 3093 static void virtual_engine_initial_hint(struct virtual_engine *ve) 3094 { 3095 int swp; 3096 3097 /* 3098 * Pick a random sibling on starting to help spread the load around. 3099 * 3100 * New contexts are typically created with exactly the same order 3101 * of siblings, and often started in batches. Due to the way we iterate 3102 * the array of sibling when submitting requests, sibling[0] is 3103 * prioritised for dequeuing. If we make sure that sibling[0] is fairly 3104 * randomised across the system, we also help spread the load by the 3105 * first engine we inspect being different each time. 3106 * 3107 * NB This does not force us to execute on this engine, it will just 3108 * typically be the first we inspect for submission. 3109 */ 3110 swp = prandom_u32_max(ve->num_siblings); 3111 if (!swp) 3112 return; 3113 3114 swap(ve->siblings[swp], ve->siblings[0]); 3115 virtual_update_register_offsets(ve->context.lrc_reg_state, 3116 ve->siblings[0]); 3117 } 3118 3119 static int virtual_context_pin(struct intel_context *ce) 3120 { 3121 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3122 int err; 3123 3124 /* Note: we must use a real engine class for setting up reg state */ 3125 err = __execlists_context_pin(ce, ve->siblings[0]); 3126 if (err) 3127 return err; 3128 3129 virtual_engine_initial_hint(ve); 3130 return 0; 3131 } 3132 3133 static void virtual_context_enter(struct intel_context *ce) 3134 { 3135 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3136 unsigned int n; 3137 3138 for (n = 0; n < ve->num_siblings; n++) 3139 intel_engine_pm_get(ve->siblings[n]); 3140 } 3141 3142 static void virtual_context_exit(struct intel_context *ce) 3143 { 3144 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 3145 unsigned int n; 3146 3147 ce->saturated = 0; 3148 for (n = 0; n < ve->num_siblings; n++) 3149 intel_engine_pm_put(ve->siblings[n]); 3150 } 3151 3152 static const struct intel_context_ops virtual_context_ops = { 3153 .pin = virtual_context_pin, 3154 .unpin = execlists_context_unpin, 3155 3156 .enter = virtual_context_enter, 3157 .exit = virtual_context_exit, 3158 3159 .destroy = virtual_context_destroy, 3160 }; 3161 3162 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) 3163 { 3164 struct i915_request *rq; 3165 intel_engine_mask_t mask; 3166 3167 rq = READ_ONCE(ve->request); 3168 if (!rq) 3169 return 0; 3170 3171 /* The rq is ready for submission; rq->execution_mask is now stable. */ 3172 mask = rq->execution_mask; 3173 if (unlikely(!mask)) { 3174 /* Invalid selection, submit to a random engine in error */ 3175 i915_request_skip(rq, -ENODEV); 3176 mask = ve->siblings[0]->mask; 3177 } 3178 3179 GEM_TRACE("%s: rq=%llx:%lld, mask=%x, prio=%d\n", 3180 ve->base.name, 3181 rq->fence.context, rq->fence.seqno, 3182 mask, ve->base.execlists.queue_priority_hint); 3183 3184 return mask; 3185 } 3186 3187 static void virtual_submission_tasklet(unsigned long data) 3188 { 3189 struct virtual_engine * const ve = (struct virtual_engine *)data; 3190 const int prio = ve->base.execlists.queue_priority_hint; 3191 intel_engine_mask_t mask; 3192 unsigned int n; 3193 3194 rcu_read_lock(); 3195 mask = virtual_submission_mask(ve); 3196 rcu_read_unlock(); 3197 if (unlikely(!mask)) 3198 return; 3199 3200 local_irq_disable(); 3201 for (n = 0; READ_ONCE(ve->request) && n < ve->num_siblings; n++) { 3202 struct intel_engine_cs *sibling = ve->siblings[n]; 3203 struct ve_node * const node = &ve->nodes[sibling->id]; 3204 struct rb_node **parent, *rb; 3205 bool first; 3206 3207 if (unlikely(!(mask & sibling->mask))) { 3208 if (!RB_EMPTY_NODE(&node->rb)) { 3209 spin_lock(&sibling->timeline.lock); 3210 rb_erase_cached(&node->rb, 3211 &sibling->execlists.virtual); 3212 RB_CLEAR_NODE(&node->rb); 3213 spin_unlock(&sibling->timeline.lock); 3214 } 3215 continue; 3216 } 3217 3218 spin_lock(&sibling->timeline.lock); 3219 3220 if (!RB_EMPTY_NODE(&node->rb)) { 3221 /* 3222 * Cheat and avoid rebalancing the tree if we can 3223 * reuse this node in situ. 3224 */ 3225 first = rb_first_cached(&sibling->execlists.virtual) == 3226 &node->rb; 3227 if (prio == node->prio || (prio > node->prio && first)) 3228 goto submit_engine; 3229 3230 rb_erase_cached(&node->rb, &sibling->execlists.virtual); 3231 } 3232 3233 rb = NULL; 3234 first = true; 3235 parent = &sibling->execlists.virtual.rb_root.rb_node; 3236 while (*parent) { 3237 struct ve_node *other; 3238 3239 rb = *parent; 3240 other = rb_entry(rb, typeof(*other), rb); 3241 if (prio > other->prio) { 3242 parent = &rb->rb_left; 3243 } else { 3244 parent = &rb->rb_right; 3245 first = false; 3246 } 3247 } 3248 3249 rb_link_node(&node->rb, rb, parent); 3250 rb_insert_color_cached(&node->rb, 3251 &sibling->execlists.virtual, 3252 first); 3253 3254 submit_engine: 3255 GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); 3256 node->prio = prio; 3257 if (first && prio > sibling->execlists.queue_priority_hint) { 3258 sibling->execlists.queue_priority_hint = prio; 3259 tasklet_hi_schedule(&sibling->execlists.tasklet); 3260 } 3261 3262 spin_unlock(&sibling->timeline.lock); 3263 } 3264 local_irq_enable(); 3265 } 3266 3267 static void virtual_submit_request(struct i915_request *rq) 3268 { 3269 struct virtual_engine *ve = to_virtual_engine(rq->engine); 3270 3271 GEM_TRACE("%s: rq=%llx:%lld\n", 3272 ve->base.name, 3273 rq->fence.context, 3274 rq->fence.seqno); 3275 3276 GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); 3277 3278 GEM_BUG_ON(ve->request); 3279 ve->base.execlists.queue_priority_hint = rq_prio(rq); 3280 WRITE_ONCE(ve->request, rq); 3281 3282 tasklet_schedule(&ve->base.execlists.tasklet); 3283 } 3284 3285 static struct ve_bond * 3286 virtual_find_bond(struct virtual_engine *ve, 3287 const struct intel_engine_cs *master) 3288 { 3289 int i; 3290 3291 for (i = 0; i < ve->num_bonds; i++) { 3292 if (ve->bonds[i].master == master) 3293 return &ve->bonds[i]; 3294 } 3295 3296 return NULL; 3297 } 3298 3299 static void 3300 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) 3301 { 3302 struct virtual_engine *ve = to_virtual_engine(rq->engine); 3303 struct ve_bond *bond; 3304 3305 bond = virtual_find_bond(ve, to_request(signal)->engine); 3306 if (bond) { 3307 intel_engine_mask_t old, new, cmp; 3308 3309 cmp = READ_ONCE(rq->execution_mask); 3310 do { 3311 old = cmp; 3312 new = cmp & bond->sibling_mask; 3313 } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old); 3314 } 3315 } 3316 3317 struct intel_context * 3318 intel_execlists_create_virtual(struct i915_gem_context *ctx, 3319 struct intel_engine_cs **siblings, 3320 unsigned int count) 3321 { 3322 struct virtual_engine *ve; 3323 unsigned int n; 3324 int err; 3325 3326 if (count == 0) 3327 return ERR_PTR(-EINVAL); 3328 3329 if (count == 1) 3330 return intel_context_create(ctx, siblings[0]); 3331 3332 ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); 3333 if (!ve) 3334 return ERR_PTR(-ENOMEM); 3335 3336 ve->base.i915 = ctx->i915; 3337 ve->base.id = -1; 3338 ve->base.class = OTHER_CLASS; 3339 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; 3340 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 3341 ve->base.flags = I915_ENGINE_IS_VIRTUAL; 3342 3343 snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); 3344 3345 err = i915_timeline_init(ctx->i915, &ve->base.timeline, NULL); 3346 if (err) 3347 goto err_put; 3348 i915_timeline_set_subclass(&ve->base.timeline, TIMELINE_VIRTUAL); 3349 3350 intel_engine_init_execlists(&ve->base); 3351 3352 ve->base.cops = &virtual_context_ops; 3353 ve->base.request_alloc = execlists_request_alloc; 3354 3355 ve->base.schedule = i915_schedule; 3356 ve->base.submit_request = virtual_submit_request; 3357 ve->base.bond_execute = virtual_bond_execute; 3358 3359 ve->base.execlists.queue_priority_hint = INT_MIN; 3360 tasklet_init(&ve->base.execlists.tasklet, 3361 virtual_submission_tasklet, 3362 (unsigned long)ve); 3363 3364 intel_context_init(&ve->context, ctx, &ve->base); 3365 3366 for (n = 0; n < count; n++) { 3367 struct intel_engine_cs *sibling = siblings[n]; 3368 3369 GEM_BUG_ON(!is_power_of_2(sibling->mask)); 3370 if (sibling->mask & ve->base.mask) { 3371 DRM_DEBUG("duplicate %s entry in load balancer\n", 3372 sibling->name); 3373 err = -EINVAL; 3374 goto err_put; 3375 } 3376 3377 /* 3378 * The virtual engine implementation is tightly coupled to 3379 * the execlists backend -- we push out request directly 3380 * into a tree inside each physical engine. We could support 3381 * layering if we handle cloning of the requests and 3382 * submitting a copy into each backend. 3383 */ 3384 if (sibling->execlists.tasklet.func != 3385 execlists_submission_tasklet) { 3386 err = -ENODEV; 3387 goto err_put; 3388 } 3389 3390 GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); 3391 RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); 3392 3393 ve->siblings[ve->num_siblings++] = sibling; 3394 ve->base.mask |= sibling->mask; 3395 3396 /* 3397 * All physical engines must be compatible for their emission 3398 * functions (as we build the instructions during request 3399 * construction and do not alter them before submission 3400 * on the physical engine). We use the engine class as a guide 3401 * here, although that could be refined. 3402 */ 3403 if (ve->base.class != OTHER_CLASS) { 3404 if (ve->base.class != sibling->class) { 3405 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", 3406 sibling->class, ve->base.class); 3407 err = -EINVAL; 3408 goto err_put; 3409 } 3410 continue; 3411 } 3412 3413 ve->base.class = sibling->class; 3414 ve->base.uabi_class = sibling->uabi_class; 3415 snprintf(ve->base.name, sizeof(ve->base.name), 3416 "v%dx%d", ve->base.class, count); 3417 ve->base.context_size = sibling->context_size; 3418 3419 ve->base.emit_bb_start = sibling->emit_bb_start; 3420 ve->base.emit_flush = sibling->emit_flush; 3421 ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; 3422 ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; 3423 ve->base.emit_fini_breadcrumb_dw = 3424 sibling->emit_fini_breadcrumb_dw; 3425 } 3426 3427 return &ve->context; 3428 3429 err_put: 3430 intel_context_put(&ve->context); 3431 return ERR_PTR(err); 3432 } 3433 3434 struct intel_context * 3435 intel_execlists_clone_virtual(struct i915_gem_context *ctx, 3436 struct intel_engine_cs *src) 3437 { 3438 struct virtual_engine *se = to_virtual_engine(src); 3439 struct intel_context *dst; 3440 3441 dst = intel_execlists_create_virtual(ctx, 3442 se->siblings, 3443 se->num_siblings); 3444 if (IS_ERR(dst)) 3445 return dst; 3446 3447 if (se->num_bonds) { 3448 struct virtual_engine *de = to_virtual_engine(dst->engine); 3449 3450 de->bonds = kmemdup(se->bonds, 3451 sizeof(*se->bonds) * se->num_bonds, 3452 GFP_KERNEL); 3453 if (!de->bonds) { 3454 intel_context_put(dst); 3455 return ERR_PTR(-ENOMEM); 3456 } 3457 3458 de->num_bonds = se->num_bonds; 3459 } 3460 3461 return dst; 3462 } 3463 3464 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, 3465 const struct intel_engine_cs *master, 3466 const struct intel_engine_cs *sibling) 3467 { 3468 struct virtual_engine *ve = to_virtual_engine(engine); 3469 struct ve_bond *bond; 3470 int n; 3471 3472 /* Sanity check the sibling is part of the virtual engine */ 3473 for (n = 0; n < ve->num_siblings; n++) 3474 if (sibling == ve->siblings[n]) 3475 break; 3476 if (n == ve->num_siblings) 3477 return -EINVAL; 3478 3479 bond = virtual_find_bond(ve, master); 3480 if (bond) { 3481 bond->sibling_mask |= sibling->mask; 3482 return 0; 3483 } 3484 3485 bond = krealloc(ve->bonds, 3486 sizeof(*bond) * (ve->num_bonds + 1), 3487 GFP_KERNEL); 3488 if (!bond) 3489 return -ENOMEM; 3490 3491 bond[ve->num_bonds].master = master; 3492 bond[ve->num_bonds].sibling_mask = sibling->mask; 3493 3494 ve->bonds = bond; 3495 ve->num_bonds++; 3496 3497 return 0; 3498 } 3499 3500 void intel_execlists_show_requests(struct intel_engine_cs *engine, 3501 struct drm_printer *m, 3502 void (*show_request)(struct drm_printer *m, 3503 struct i915_request *rq, 3504 const char *prefix), 3505 unsigned int max) 3506 { 3507 const struct intel_engine_execlists *execlists = &engine->execlists; 3508 struct i915_request *rq, *last; 3509 unsigned long flags; 3510 unsigned int count; 3511 struct rb_node *rb; 3512 3513 spin_lock_irqsave(&engine->timeline.lock, flags); 3514 3515 last = NULL; 3516 count = 0; 3517 list_for_each_entry(rq, &engine->timeline.requests, link) { 3518 if (count++ < max - 1) 3519 show_request(m, rq, "\t\tE "); 3520 else 3521 last = rq; 3522 } 3523 if (last) { 3524 if (count > max) { 3525 drm_printf(m, 3526 "\t\t...skipping %d executing requests...\n", 3527 count - max); 3528 } 3529 show_request(m, last, "\t\tE "); 3530 } 3531 3532 last = NULL; 3533 count = 0; 3534 if (execlists->queue_priority_hint != INT_MIN) 3535 drm_printf(m, "\t\tQueue priority hint: %d\n", 3536 execlists->queue_priority_hint); 3537 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 3538 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 3539 int i; 3540 3541 priolist_for_each_request(rq, p, i) { 3542 if (count++ < max - 1) 3543 show_request(m, rq, "\t\tQ "); 3544 else 3545 last = rq; 3546 } 3547 } 3548 if (last) { 3549 if (count > max) { 3550 drm_printf(m, 3551 "\t\t...skipping %d queued requests...\n", 3552 count - max); 3553 } 3554 show_request(m, last, "\t\tQ "); 3555 } 3556 3557 last = NULL; 3558 count = 0; 3559 for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { 3560 struct virtual_engine *ve = 3561 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 3562 struct i915_request *rq = READ_ONCE(ve->request); 3563 3564 if (rq) { 3565 if (count++ < max - 1) 3566 show_request(m, rq, "\t\tV "); 3567 else 3568 last = rq; 3569 } 3570 } 3571 if (last) { 3572 if (count > max) { 3573 drm_printf(m, 3574 "\t\t...skipping %d virtual requests...\n", 3575 count - max); 3576 } 3577 show_request(m, last, "\t\tV "); 3578 } 3579 3580 spin_unlock_irqrestore(&engine->timeline.lock, flags); 3581 } 3582 3583 void intel_lr_context_reset(struct intel_engine_cs *engine, 3584 struct intel_context *ce, 3585 u32 head, 3586 bool scrub) 3587 { 3588 /* 3589 * We want a simple context + ring to execute the breadcrumb update. 3590 * We cannot rely on the context being intact across the GPU hang, 3591 * so clear it and rebuild just what we need for the breadcrumb. 3592 * All pending requests for this context will be zapped, and any 3593 * future request will be after userspace has had the opportunity 3594 * to recreate its own state. 3595 */ 3596 if (scrub) { 3597 u32 *regs = ce->lrc_reg_state; 3598 3599 if (engine->pinned_default_state) { 3600 memcpy(regs, /* skip restoring the vanilla PPHWSP */ 3601 engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, 3602 engine->context_size - PAGE_SIZE); 3603 } 3604 execlists_init_reg_state(regs, ce, engine, ce->ring); 3605 } 3606 3607 /* Rerun the request; its payload has been neutered (if guilty). */ 3608 ce->ring->head = head; 3609 intel_ring_update_space(ce->ring); 3610 3611 __execlists_update_reg_state(ce, engine); 3612 } 3613 3614 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 3615 #include "selftest_lrc.c" 3616 #endif 3617