1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include "i915_drv.h" 137 #include "i915_perf.h" 138 #include "i915_trace.h" 139 #include "i915_vgpu.h" 140 #include "intel_breadcrumbs.h" 141 #include "intel_context.h" 142 #include "intel_engine_pm.h" 143 #include "intel_gt.h" 144 #include "intel_gt_pm.h" 145 #include "intel_gt_requests.h" 146 #include "intel_lrc_reg.h" 147 #include "intel_mocs.h" 148 #include "intel_reset.h" 149 #include "intel_ring.h" 150 #include "intel_workarounds.h" 151 #include "shmem_utils.h" 152 153 #define RING_EXECLIST_QFULL (1 << 0x2) 154 #define RING_EXECLIST1_VALID (1 << 0x3) 155 #define RING_EXECLIST0_VALID (1 << 0x4) 156 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 157 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 158 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 159 160 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 161 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 162 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 163 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 164 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 165 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 166 167 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 168 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) 169 170 #define CTX_DESC_FORCE_RESTORE BIT_ULL(2) 171 172 #define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ 173 #define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ 174 #define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) 175 #define GEN12_IDLE_CTX_ID 0x7FF 176 #define GEN12_CSB_CTX_VALID(csb_dw) \ 177 (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) 178 179 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 180 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 181 182 struct virtual_engine { 183 struct intel_engine_cs base; 184 struct intel_context context; 185 186 /* 187 * We allow only a single request through the virtual engine at a time 188 * (each request in the timeline waits for the completion fence of 189 * the previous before being submitted). By restricting ourselves to 190 * only submitting a single request, each request is placed on to a 191 * physical to maximise load spreading (by virtue of the late greedy 192 * scheduling -- each real engine takes the next available request 193 * upon idling). 194 */ 195 struct i915_request *request; 196 197 /* 198 * We keep a rbtree of available virtual engines inside each physical 199 * engine, sorted by priority. Here we preallocate the nodes we need 200 * for the virtual engine, indexed by physical_engine->id. 201 */ 202 struct ve_node { 203 struct rb_node rb; 204 int prio; 205 } nodes[I915_NUM_ENGINES]; 206 207 /* 208 * Keep track of bonded pairs -- restrictions upon on our selection 209 * of physical engines any particular request may be submitted to. 210 * If we receive a submit-fence from a master engine, we will only 211 * use one of sibling_mask physical engines. 212 */ 213 struct ve_bond { 214 const struct intel_engine_cs *master; 215 intel_engine_mask_t sibling_mask; 216 } *bonds; 217 unsigned int num_bonds; 218 219 /* And finally, which physical engines this virtual engine maps onto. */ 220 unsigned int num_siblings; 221 struct intel_engine_cs *siblings[]; 222 }; 223 224 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) 225 { 226 GEM_BUG_ON(!intel_engine_is_virtual(engine)); 227 return container_of(engine, struct virtual_engine, base); 228 } 229 230 static int __execlists_context_alloc(struct intel_context *ce, 231 struct intel_engine_cs *engine); 232 233 static void execlists_init_reg_state(u32 *reg_state, 234 const struct intel_context *ce, 235 const struct intel_engine_cs *engine, 236 const struct intel_ring *ring, 237 bool close); 238 static void 239 __execlists_update_reg_state(const struct intel_context *ce, 240 const struct intel_engine_cs *engine, 241 u32 head); 242 243 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) 244 { 245 if (INTEL_GEN(engine->i915) >= 12) 246 return 0x60; 247 else if (INTEL_GEN(engine->i915) >= 9) 248 return 0x54; 249 else if (engine->class == RENDER_CLASS) 250 return 0x58; 251 else 252 return -1; 253 } 254 255 static int lrc_ring_gpr0(const struct intel_engine_cs *engine) 256 { 257 if (INTEL_GEN(engine->i915) >= 12) 258 return 0x74; 259 else if (INTEL_GEN(engine->i915) >= 9) 260 return 0x68; 261 else if (engine->class == RENDER_CLASS) 262 return 0xd8; 263 else 264 return -1; 265 } 266 267 static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) 268 { 269 if (INTEL_GEN(engine->i915) >= 12) 270 return 0x12; 271 else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) 272 return 0x18; 273 else 274 return -1; 275 } 276 277 static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) 278 { 279 int x; 280 281 x = lrc_ring_wa_bb_per_ctx(engine); 282 if (x < 0) 283 return x; 284 285 return x + 2; 286 } 287 288 static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) 289 { 290 int x; 291 292 x = lrc_ring_indirect_ptr(engine); 293 if (x < 0) 294 return x; 295 296 return x + 2; 297 } 298 299 static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) 300 { 301 if (engine->class != RENDER_CLASS) 302 return -1; 303 304 if (INTEL_GEN(engine->i915) >= 12) 305 return 0xb6; 306 else if (INTEL_GEN(engine->i915) >= 11) 307 return 0xaa; 308 else 309 return -1; 310 } 311 312 static u32 313 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) 314 { 315 switch (INTEL_GEN(engine->i915)) { 316 default: 317 MISSING_CASE(INTEL_GEN(engine->i915)); 318 fallthrough; 319 case 12: 320 return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 321 case 11: 322 return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 323 case 10: 324 return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 325 case 9: 326 return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 327 case 8: 328 return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 329 } 330 } 331 332 static void 333 lrc_ring_setup_indirect_ctx(u32 *regs, 334 const struct intel_engine_cs *engine, 335 u32 ctx_bb_ggtt_addr, 336 u32 size) 337 { 338 GEM_BUG_ON(!size); 339 GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); 340 GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); 341 regs[lrc_ring_indirect_ptr(engine) + 1] = 342 ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); 343 344 GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); 345 regs[lrc_ring_indirect_offset(engine) + 1] = 346 lrc_ring_indirect_offset_default(engine) << 6; 347 } 348 349 static u32 intel_context_get_runtime(const struct intel_context *ce) 350 { 351 /* 352 * We can use either ppHWSP[16] which is recorded before the context 353 * switch (and so excludes the cost of context switches) or use the 354 * value from the context image itself, which is saved/restored earlier 355 * and so includes the cost of the save. 356 */ 357 return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); 358 } 359 360 static void mark_eio(struct i915_request *rq) 361 { 362 if (i915_request_completed(rq)) 363 return; 364 365 GEM_BUG_ON(i915_request_signaled(rq)); 366 367 i915_request_set_error_once(rq, -EIO); 368 i915_request_mark_complete(rq); 369 } 370 371 static struct i915_request * 372 active_request(const struct intel_timeline * const tl, struct i915_request *rq) 373 { 374 struct i915_request *active = rq; 375 376 rcu_read_lock(); 377 list_for_each_entry_continue_reverse(rq, &tl->requests, link) { 378 if (i915_request_completed(rq)) 379 break; 380 381 active = rq; 382 } 383 rcu_read_unlock(); 384 385 return active; 386 } 387 388 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) 389 { 390 return (i915_ggtt_offset(engine->status_page.vma) + 391 I915_GEM_HWS_PREEMPT_ADDR); 392 } 393 394 static inline void 395 ring_set_paused(const struct intel_engine_cs *engine, int state) 396 { 397 /* 398 * We inspect HWS_PREEMPT with a semaphore inside 399 * engine->emit_fini_breadcrumb. If the dword is true, 400 * the ring is paused as the semaphore will busywait 401 * until the dword is false. 402 */ 403 engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; 404 if (state) 405 wmb(); 406 } 407 408 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 409 { 410 return rb_entry(rb, struct i915_priolist, node); 411 } 412 413 static inline int rq_prio(const struct i915_request *rq) 414 { 415 return READ_ONCE(rq->sched.attr.priority); 416 } 417 418 static int effective_prio(const struct i915_request *rq) 419 { 420 int prio = rq_prio(rq); 421 422 /* 423 * If this request is special and must not be interrupted at any 424 * cost, so be it. Note we are only checking the most recent request 425 * in the context and so may be masking an earlier vip request. It 426 * is hoped that under the conditions where nopreempt is used, this 427 * will not matter (i.e. all requests to that context will be 428 * nopreempt for as long as desired). 429 */ 430 if (i915_request_has_nopreempt(rq)) 431 prio = I915_PRIORITY_UNPREEMPTABLE; 432 433 return prio; 434 } 435 436 static int queue_prio(const struct intel_engine_execlists *execlists) 437 { 438 struct i915_priolist *p; 439 struct rb_node *rb; 440 441 rb = rb_first_cached(&execlists->queue); 442 if (!rb) 443 return INT_MIN; 444 445 /* 446 * As the priolist[] are inverted, with the highest priority in [0], 447 * we have to flip the index value to become priority. 448 */ 449 p = to_priolist(rb); 450 if (!I915_USER_PRIORITY_SHIFT) 451 return p->priority; 452 453 return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); 454 } 455 456 static inline bool need_preempt(const struct intel_engine_cs *engine, 457 const struct i915_request *rq, 458 struct rb_node *rb) 459 { 460 int last_prio; 461 462 if (!intel_engine_has_semaphores(engine)) 463 return false; 464 465 /* 466 * Check if the current priority hint merits a preemption attempt. 467 * 468 * We record the highest value priority we saw during rescheduling 469 * prior to this dequeue, therefore we know that if it is strictly 470 * less than the current tail of ESLP[0], we do not need to force 471 * a preempt-to-idle cycle. 472 * 473 * However, the priority hint is a mere hint that we may need to 474 * preempt. If that hint is stale or we may be trying to preempt 475 * ourselves, ignore the request. 476 * 477 * More naturally we would write 478 * prio >= max(0, last); 479 * except that we wish to prevent triggering preemption at the same 480 * priority level: the task that is running should remain running 481 * to preserve FIFO ordering of dependencies. 482 */ 483 last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); 484 if (engine->execlists.queue_priority_hint <= last_prio) 485 return false; 486 487 /* 488 * Check against the first request in ELSP[1], it will, thanks to the 489 * power of PI, be the highest priority of that context. 490 */ 491 if (!list_is_last(&rq->sched.link, &engine->active.requests) && 492 rq_prio(list_next_entry(rq, sched.link)) > last_prio) 493 return true; 494 495 if (rb) { 496 struct virtual_engine *ve = 497 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 498 bool preempt = false; 499 500 if (engine == ve->siblings[0]) { /* only preempt one sibling */ 501 struct i915_request *next; 502 503 rcu_read_lock(); 504 next = READ_ONCE(ve->request); 505 if (next) 506 preempt = rq_prio(next) > last_prio; 507 rcu_read_unlock(); 508 } 509 510 if (preempt) 511 return preempt; 512 } 513 514 /* 515 * If the inflight context did not trigger the preemption, then maybe 516 * it was the set of queued requests? Pick the highest priority in 517 * the queue (the first active priolist) and see if it deserves to be 518 * running instead of ELSP[0]. 519 * 520 * The highest priority request in the queue can not be either 521 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same 522 * context, it's priority would not exceed ELSP[0] aka last_prio. 523 */ 524 return queue_prio(&engine->execlists) > last_prio; 525 } 526 527 __maybe_unused static inline bool 528 assert_priority_queue(const struct i915_request *prev, 529 const struct i915_request *next) 530 { 531 /* 532 * Without preemption, the prev may refer to the still active element 533 * which we refuse to let go. 534 * 535 * Even with preemption, there are times when we think it is better not 536 * to preempt and leave an ostensibly lower priority request in flight. 537 */ 538 if (i915_request_is_active(prev)) 539 return true; 540 541 return rq_prio(prev) >= rq_prio(next); 542 } 543 544 /* 545 * The context descriptor encodes various attributes of a context, 546 * including its GTT address and some flags. Because it's fairly 547 * expensive to calculate, we'll just do it once and cache the result, 548 * which remains valid until the context is unpinned. 549 * 550 * This is what a descriptor looks like, from LSB to MSB:: 551 * 552 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) 553 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 554 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) 555 * bits 53-54: mbz, reserved for use by hardware 556 * bits 55-63: group ID, currently unused and set to 0 557 * 558 * Starting from Gen11, the upper dword of the descriptor has a new format: 559 * 560 * bits 32-36: reserved 561 * bits 37-47: SW context ID 562 * bits 48:53: engine instance 563 * bit 54: mbz, reserved for use by hardware 564 * bits 55-60: SW counter 565 * bits 61-63: engine class 566 * 567 * engine info, SW context ID and SW counter need to form a unique number 568 * (Context ID) per lrc. 569 */ 570 static u32 571 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) 572 { 573 u32 desc; 574 575 desc = INTEL_LEGACY_32B_CONTEXT; 576 if (i915_vm_is_4lvl(ce->vm)) 577 desc = INTEL_LEGACY_64B_CONTEXT; 578 desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; 579 580 desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; 581 if (IS_GEN(engine->i915, 8)) 582 desc |= GEN8_CTX_L3LLC_COHERENT; 583 584 return i915_ggtt_offset(ce->state) | desc; 585 } 586 587 static inline unsigned int dword_in_page(void *addr) 588 { 589 return offset_in_page(addr) / sizeof(u32); 590 } 591 592 static void set_offsets(u32 *regs, 593 const u8 *data, 594 const struct intel_engine_cs *engine, 595 bool clear) 596 #define NOP(x) (BIT(7) | (x)) 597 #define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) 598 #define POSTED BIT(0) 599 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) 600 #define REG16(x) \ 601 (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ 602 (((x) >> 2) & 0x7f) 603 #define END(total_state_size) 0, (total_state_size) 604 { 605 const u32 base = engine->mmio_base; 606 607 while (*data) { 608 u8 count, flags; 609 610 if (*data & BIT(7)) { /* skip */ 611 count = *data++ & ~BIT(7); 612 if (clear) 613 memset32(regs, MI_NOOP, count); 614 regs += count; 615 continue; 616 } 617 618 count = *data & 0x3f; 619 flags = *data >> 6; 620 data++; 621 622 *regs = MI_LOAD_REGISTER_IMM(count); 623 if (flags & POSTED) 624 *regs |= MI_LRI_FORCE_POSTED; 625 if (INTEL_GEN(engine->i915) >= 11) 626 *regs |= MI_LRI_LRM_CS_MMIO; 627 regs++; 628 629 GEM_BUG_ON(!count); 630 do { 631 u32 offset = 0; 632 u8 v; 633 634 do { 635 v = *data++; 636 offset <<= 7; 637 offset |= v & ~BIT(7); 638 } while (v & BIT(7)); 639 640 regs[0] = base + (offset << 2); 641 if (clear) 642 regs[1] = 0; 643 regs += 2; 644 } while (--count); 645 } 646 647 if (clear) { 648 u8 count = *++data; 649 650 /* Clear past the tail for HW access */ 651 GEM_BUG_ON(dword_in_page(regs) > count); 652 memset32(regs, MI_NOOP, count - dword_in_page(regs)); 653 654 /* Close the batch; used mainly by live_lrc_layout() */ 655 *regs = MI_BATCH_BUFFER_END; 656 if (INTEL_GEN(engine->i915) >= 10) 657 *regs |= BIT(0); 658 } 659 } 660 661 static const u8 gen8_xcs_offsets[] = { 662 NOP(1), 663 LRI(11, 0), 664 REG16(0x244), 665 REG(0x034), 666 REG(0x030), 667 REG(0x038), 668 REG(0x03c), 669 REG(0x168), 670 REG(0x140), 671 REG(0x110), 672 REG(0x11c), 673 REG(0x114), 674 REG(0x118), 675 676 NOP(9), 677 LRI(9, 0), 678 REG16(0x3a8), 679 REG16(0x28c), 680 REG16(0x288), 681 REG16(0x284), 682 REG16(0x280), 683 REG16(0x27c), 684 REG16(0x278), 685 REG16(0x274), 686 REG16(0x270), 687 688 NOP(13), 689 LRI(2, 0), 690 REG16(0x200), 691 REG(0x028), 692 693 END(80) 694 }; 695 696 static const u8 gen9_xcs_offsets[] = { 697 NOP(1), 698 LRI(14, POSTED), 699 REG16(0x244), 700 REG(0x034), 701 REG(0x030), 702 REG(0x038), 703 REG(0x03c), 704 REG(0x168), 705 REG(0x140), 706 REG(0x110), 707 REG(0x11c), 708 REG(0x114), 709 REG(0x118), 710 REG(0x1c0), 711 REG(0x1c4), 712 REG(0x1c8), 713 714 NOP(3), 715 LRI(9, POSTED), 716 REG16(0x3a8), 717 REG16(0x28c), 718 REG16(0x288), 719 REG16(0x284), 720 REG16(0x280), 721 REG16(0x27c), 722 REG16(0x278), 723 REG16(0x274), 724 REG16(0x270), 725 726 NOP(13), 727 LRI(1, POSTED), 728 REG16(0x200), 729 730 NOP(13), 731 LRI(44, POSTED), 732 REG(0x028), 733 REG(0x09c), 734 REG(0x0c0), 735 REG(0x178), 736 REG(0x17c), 737 REG16(0x358), 738 REG(0x170), 739 REG(0x150), 740 REG(0x154), 741 REG(0x158), 742 REG16(0x41c), 743 REG16(0x600), 744 REG16(0x604), 745 REG16(0x608), 746 REG16(0x60c), 747 REG16(0x610), 748 REG16(0x614), 749 REG16(0x618), 750 REG16(0x61c), 751 REG16(0x620), 752 REG16(0x624), 753 REG16(0x628), 754 REG16(0x62c), 755 REG16(0x630), 756 REG16(0x634), 757 REG16(0x638), 758 REG16(0x63c), 759 REG16(0x640), 760 REG16(0x644), 761 REG16(0x648), 762 REG16(0x64c), 763 REG16(0x650), 764 REG16(0x654), 765 REG16(0x658), 766 REG16(0x65c), 767 REG16(0x660), 768 REG16(0x664), 769 REG16(0x668), 770 REG16(0x66c), 771 REG16(0x670), 772 REG16(0x674), 773 REG16(0x678), 774 REG16(0x67c), 775 REG(0x068), 776 777 END(176) 778 }; 779 780 static const u8 gen12_xcs_offsets[] = { 781 NOP(1), 782 LRI(13, POSTED), 783 REG16(0x244), 784 REG(0x034), 785 REG(0x030), 786 REG(0x038), 787 REG(0x03c), 788 REG(0x168), 789 REG(0x140), 790 REG(0x110), 791 REG(0x1c0), 792 REG(0x1c4), 793 REG(0x1c8), 794 REG(0x180), 795 REG16(0x2b4), 796 797 NOP(5), 798 LRI(9, POSTED), 799 REG16(0x3a8), 800 REG16(0x28c), 801 REG16(0x288), 802 REG16(0x284), 803 REG16(0x280), 804 REG16(0x27c), 805 REG16(0x278), 806 REG16(0x274), 807 REG16(0x270), 808 809 END(80) 810 }; 811 812 static const u8 gen8_rcs_offsets[] = { 813 NOP(1), 814 LRI(14, POSTED), 815 REG16(0x244), 816 REG(0x034), 817 REG(0x030), 818 REG(0x038), 819 REG(0x03c), 820 REG(0x168), 821 REG(0x140), 822 REG(0x110), 823 REG(0x11c), 824 REG(0x114), 825 REG(0x118), 826 REG(0x1c0), 827 REG(0x1c4), 828 REG(0x1c8), 829 830 NOP(3), 831 LRI(9, POSTED), 832 REG16(0x3a8), 833 REG16(0x28c), 834 REG16(0x288), 835 REG16(0x284), 836 REG16(0x280), 837 REG16(0x27c), 838 REG16(0x278), 839 REG16(0x274), 840 REG16(0x270), 841 842 NOP(13), 843 LRI(1, 0), 844 REG(0x0c8), 845 846 END(80) 847 }; 848 849 static const u8 gen9_rcs_offsets[] = { 850 NOP(1), 851 LRI(14, POSTED), 852 REG16(0x244), 853 REG(0x34), 854 REG(0x30), 855 REG(0x38), 856 REG(0x3c), 857 REG(0x168), 858 REG(0x140), 859 REG(0x110), 860 REG(0x11c), 861 REG(0x114), 862 REG(0x118), 863 REG(0x1c0), 864 REG(0x1c4), 865 REG(0x1c8), 866 867 NOP(3), 868 LRI(9, POSTED), 869 REG16(0x3a8), 870 REG16(0x28c), 871 REG16(0x288), 872 REG16(0x284), 873 REG16(0x280), 874 REG16(0x27c), 875 REG16(0x278), 876 REG16(0x274), 877 REG16(0x270), 878 879 NOP(13), 880 LRI(1, 0), 881 REG(0xc8), 882 883 NOP(13), 884 LRI(44, POSTED), 885 REG(0x28), 886 REG(0x9c), 887 REG(0xc0), 888 REG(0x178), 889 REG(0x17c), 890 REG16(0x358), 891 REG(0x170), 892 REG(0x150), 893 REG(0x154), 894 REG(0x158), 895 REG16(0x41c), 896 REG16(0x600), 897 REG16(0x604), 898 REG16(0x608), 899 REG16(0x60c), 900 REG16(0x610), 901 REG16(0x614), 902 REG16(0x618), 903 REG16(0x61c), 904 REG16(0x620), 905 REG16(0x624), 906 REG16(0x628), 907 REG16(0x62c), 908 REG16(0x630), 909 REG16(0x634), 910 REG16(0x638), 911 REG16(0x63c), 912 REG16(0x640), 913 REG16(0x644), 914 REG16(0x648), 915 REG16(0x64c), 916 REG16(0x650), 917 REG16(0x654), 918 REG16(0x658), 919 REG16(0x65c), 920 REG16(0x660), 921 REG16(0x664), 922 REG16(0x668), 923 REG16(0x66c), 924 REG16(0x670), 925 REG16(0x674), 926 REG16(0x678), 927 REG16(0x67c), 928 REG(0x68), 929 930 END(176) 931 }; 932 933 static const u8 gen11_rcs_offsets[] = { 934 NOP(1), 935 LRI(15, POSTED), 936 REG16(0x244), 937 REG(0x034), 938 REG(0x030), 939 REG(0x038), 940 REG(0x03c), 941 REG(0x168), 942 REG(0x140), 943 REG(0x110), 944 REG(0x11c), 945 REG(0x114), 946 REG(0x118), 947 REG(0x1c0), 948 REG(0x1c4), 949 REG(0x1c8), 950 REG(0x180), 951 952 NOP(1), 953 LRI(9, POSTED), 954 REG16(0x3a8), 955 REG16(0x28c), 956 REG16(0x288), 957 REG16(0x284), 958 REG16(0x280), 959 REG16(0x27c), 960 REG16(0x278), 961 REG16(0x274), 962 REG16(0x270), 963 964 LRI(1, POSTED), 965 REG(0x1b0), 966 967 NOP(10), 968 LRI(1, 0), 969 REG(0x0c8), 970 971 END(80) 972 }; 973 974 static const u8 gen12_rcs_offsets[] = { 975 NOP(1), 976 LRI(13, POSTED), 977 REG16(0x244), 978 REG(0x034), 979 REG(0x030), 980 REG(0x038), 981 REG(0x03c), 982 REG(0x168), 983 REG(0x140), 984 REG(0x110), 985 REG(0x1c0), 986 REG(0x1c4), 987 REG(0x1c8), 988 REG(0x180), 989 REG16(0x2b4), 990 991 NOP(5), 992 LRI(9, POSTED), 993 REG16(0x3a8), 994 REG16(0x28c), 995 REG16(0x288), 996 REG16(0x284), 997 REG16(0x280), 998 REG16(0x27c), 999 REG16(0x278), 1000 REG16(0x274), 1001 REG16(0x270), 1002 1003 LRI(3, POSTED), 1004 REG(0x1b0), 1005 REG16(0x5a8), 1006 REG16(0x5ac), 1007 1008 NOP(6), 1009 LRI(1, 0), 1010 REG(0x0c8), 1011 NOP(3 + 9 + 1), 1012 1013 LRI(51, POSTED), 1014 REG16(0x588), 1015 REG16(0x588), 1016 REG16(0x588), 1017 REG16(0x588), 1018 REG16(0x588), 1019 REG16(0x588), 1020 REG(0x028), 1021 REG(0x09c), 1022 REG(0x0c0), 1023 REG(0x178), 1024 REG(0x17c), 1025 REG16(0x358), 1026 REG(0x170), 1027 REG(0x150), 1028 REG(0x154), 1029 REG(0x158), 1030 REG16(0x41c), 1031 REG16(0x600), 1032 REG16(0x604), 1033 REG16(0x608), 1034 REG16(0x60c), 1035 REG16(0x610), 1036 REG16(0x614), 1037 REG16(0x618), 1038 REG16(0x61c), 1039 REG16(0x620), 1040 REG16(0x624), 1041 REG16(0x628), 1042 REG16(0x62c), 1043 REG16(0x630), 1044 REG16(0x634), 1045 REG16(0x638), 1046 REG16(0x63c), 1047 REG16(0x640), 1048 REG16(0x644), 1049 REG16(0x648), 1050 REG16(0x64c), 1051 REG16(0x650), 1052 REG16(0x654), 1053 REG16(0x658), 1054 REG16(0x65c), 1055 REG16(0x660), 1056 REG16(0x664), 1057 REG16(0x668), 1058 REG16(0x66c), 1059 REG16(0x670), 1060 REG16(0x674), 1061 REG16(0x678), 1062 REG16(0x67c), 1063 REG(0x068), 1064 REG(0x084), 1065 NOP(1), 1066 1067 END(192) 1068 }; 1069 1070 #undef END 1071 #undef REG16 1072 #undef REG 1073 #undef LRI 1074 #undef NOP 1075 1076 static const u8 *reg_offsets(const struct intel_engine_cs *engine) 1077 { 1078 /* 1079 * The gen12+ lists only have the registers we program in the basic 1080 * default state. We rely on the context image using relative 1081 * addressing to automatic fixup the register state between the 1082 * physical engines for virtual engine. 1083 */ 1084 GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && 1085 !intel_engine_has_relative_mmio(engine)); 1086 1087 if (engine->class == RENDER_CLASS) { 1088 if (INTEL_GEN(engine->i915) >= 12) 1089 return gen12_rcs_offsets; 1090 else if (INTEL_GEN(engine->i915) >= 11) 1091 return gen11_rcs_offsets; 1092 else if (INTEL_GEN(engine->i915) >= 9) 1093 return gen9_rcs_offsets; 1094 else 1095 return gen8_rcs_offsets; 1096 } else { 1097 if (INTEL_GEN(engine->i915) >= 12) 1098 return gen12_xcs_offsets; 1099 else if (INTEL_GEN(engine->i915) >= 9) 1100 return gen9_xcs_offsets; 1101 else 1102 return gen8_xcs_offsets; 1103 } 1104 } 1105 1106 static struct i915_request * 1107 __unwind_incomplete_requests(struct intel_engine_cs *engine) 1108 { 1109 struct i915_request *rq, *rn, *active = NULL; 1110 struct list_head *pl; 1111 int prio = I915_PRIORITY_INVALID; 1112 1113 lockdep_assert_held(&engine->active.lock); 1114 1115 list_for_each_entry_safe_reverse(rq, rn, 1116 &engine->active.requests, 1117 sched.link) { 1118 if (i915_request_completed(rq)) 1119 continue; /* XXX */ 1120 1121 __i915_request_unsubmit(rq); 1122 1123 /* 1124 * Push the request back into the queue for later resubmission. 1125 * If this request is not native to this physical engine (i.e. 1126 * it came from a virtual source), push it back onto the virtual 1127 * engine so that it can be moved across onto another physical 1128 * engine as load dictates. 1129 */ 1130 if (likely(rq->execution_mask == engine->mask)) { 1131 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 1132 if (rq_prio(rq) != prio) { 1133 prio = rq_prio(rq); 1134 pl = i915_sched_lookup_priolist(engine, prio); 1135 } 1136 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 1137 1138 list_move(&rq->sched.link, pl); 1139 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 1140 1141 /* Check in case we rollback so far we wrap [size/2] */ 1142 if (intel_ring_direction(rq->ring, 1143 intel_ring_wrap(rq->ring, 1144 rq->tail), 1145 rq->ring->tail) > 0) 1146 rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; 1147 1148 active = rq; 1149 } else { 1150 struct intel_engine_cs *owner = rq->context->engine; 1151 1152 WRITE_ONCE(rq->engine, owner); 1153 owner->submit_request(rq); 1154 active = NULL; 1155 } 1156 } 1157 1158 return active; 1159 } 1160 1161 struct i915_request * 1162 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) 1163 { 1164 struct intel_engine_cs *engine = 1165 container_of(execlists, typeof(*engine), execlists); 1166 1167 return __unwind_incomplete_requests(engine); 1168 } 1169 1170 static inline void 1171 execlists_context_status_change(struct i915_request *rq, unsigned long status) 1172 { 1173 /* 1174 * Only used when GVT-g is enabled now. When GVT-g is disabled, 1175 * The compiler should eliminate this function as dead-code. 1176 */ 1177 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 1178 return; 1179 1180 atomic_notifier_call_chain(&rq->engine->context_status_notifier, 1181 status, rq); 1182 } 1183 1184 static void intel_engine_context_in(struct intel_engine_cs *engine) 1185 { 1186 unsigned long flags; 1187 1188 if (atomic_add_unless(&engine->stats.active, 1, 0)) 1189 return; 1190 1191 write_seqlock_irqsave(&engine->stats.lock, flags); 1192 if (!atomic_add_unless(&engine->stats.active, 1, 0)) { 1193 engine->stats.start = ktime_get(); 1194 atomic_inc(&engine->stats.active); 1195 } 1196 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1197 } 1198 1199 static void intel_engine_context_out(struct intel_engine_cs *engine) 1200 { 1201 unsigned long flags; 1202 1203 GEM_BUG_ON(!atomic_read(&engine->stats.active)); 1204 1205 if (atomic_add_unless(&engine->stats.active, -1, 1)) 1206 return; 1207 1208 write_seqlock_irqsave(&engine->stats.lock, flags); 1209 if (atomic_dec_and_test(&engine->stats.active)) { 1210 engine->stats.total = 1211 ktime_add(engine->stats.total, 1212 ktime_sub(ktime_get(), engine->stats.start)); 1213 } 1214 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1215 } 1216 1217 static void 1218 execlists_check_context(const struct intel_context *ce, 1219 const struct intel_engine_cs *engine) 1220 { 1221 const struct intel_ring *ring = ce->ring; 1222 u32 *regs = ce->lrc_reg_state; 1223 bool valid = true; 1224 int x; 1225 1226 if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { 1227 pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", 1228 engine->name, 1229 regs[CTX_RING_START], 1230 i915_ggtt_offset(ring->vma)); 1231 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 1232 valid = false; 1233 } 1234 1235 if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != 1236 (RING_CTL_SIZE(ring->size) | RING_VALID)) { 1237 pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", 1238 engine->name, 1239 regs[CTX_RING_CTL], 1240 (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); 1241 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 1242 valid = false; 1243 } 1244 1245 x = lrc_ring_mi_mode(engine); 1246 if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { 1247 pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", 1248 engine->name, regs[x + 1]); 1249 regs[x + 1] &= ~STOP_RING; 1250 regs[x + 1] |= STOP_RING << 16; 1251 valid = false; 1252 } 1253 1254 WARN_ONCE(!valid, "Invalid lrc state found before submission\n"); 1255 } 1256 1257 static void restore_default_state(struct intel_context *ce, 1258 struct intel_engine_cs *engine) 1259 { 1260 u32 *regs; 1261 1262 regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE); 1263 execlists_init_reg_state(regs, ce, engine, ce->ring, true); 1264 1265 ce->runtime.last = intel_context_get_runtime(ce); 1266 } 1267 1268 static void reset_active(struct i915_request *rq, 1269 struct intel_engine_cs *engine) 1270 { 1271 struct intel_context * const ce = rq->context; 1272 u32 head; 1273 1274 /* 1275 * The executing context has been cancelled. We want to prevent 1276 * further execution along this context and propagate the error on 1277 * to anything depending on its results. 1278 * 1279 * In __i915_request_submit(), we apply the -EIO and remove the 1280 * requests' payloads for any banned requests. But first, we must 1281 * rewind the context back to the start of the incomplete request so 1282 * that we do not jump back into the middle of the batch. 1283 * 1284 * We preserve the breadcrumbs and semaphores of the incomplete 1285 * requests so that inter-timeline dependencies (i.e other timelines) 1286 * remain correctly ordered. And we defer to __i915_request_submit() 1287 * so that all asynchronous waits are correctly handled. 1288 */ 1289 ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", 1290 rq->fence.context, rq->fence.seqno); 1291 1292 /* On resubmission of the active request, payload will be scrubbed */ 1293 if (i915_request_completed(rq)) 1294 head = rq->tail; 1295 else 1296 head = active_request(ce->timeline, rq)->head; 1297 head = intel_ring_wrap(ce->ring, head); 1298 1299 /* Scrub the context image to prevent replaying the previous batch */ 1300 restore_default_state(ce, engine); 1301 __execlists_update_reg_state(ce, engine, head); 1302 1303 /* We've switched away, so this should be a no-op, but intent matters */ 1304 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; 1305 } 1306 1307 static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) 1308 { 1309 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1310 ce->runtime.num_underflow += dt < 0; 1311 ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); 1312 #endif 1313 } 1314 1315 static void intel_context_update_runtime(struct intel_context *ce) 1316 { 1317 u32 old; 1318 s32 dt; 1319 1320 if (intel_context_is_barrier(ce)) 1321 return; 1322 1323 old = ce->runtime.last; 1324 ce->runtime.last = intel_context_get_runtime(ce); 1325 dt = ce->runtime.last - old; 1326 1327 if (unlikely(dt <= 0)) { 1328 CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", 1329 old, ce->runtime.last, dt); 1330 st_update_runtime_underflow(ce, dt); 1331 return; 1332 } 1333 1334 ewma_runtime_add(&ce->runtime.avg, dt); 1335 ce->runtime.total += dt; 1336 } 1337 1338 static inline struct intel_engine_cs * 1339 __execlists_schedule_in(struct i915_request *rq) 1340 { 1341 struct intel_engine_cs * const engine = rq->engine; 1342 struct intel_context * const ce = rq->context; 1343 1344 intel_context_get(ce); 1345 1346 if (unlikely(intel_context_is_banned(ce))) 1347 reset_active(rq, engine); 1348 1349 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1350 execlists_check_context(ce, engine); 1351 1352 if (ce->tag) { 1353 /* Use a fixed tag for OA and friends */ 1354 GEM_BUG_ON(ce->tag <= BITS_PER_LONG); 1355 ce->lrc.ccid = ce->tag; 1356 } else { 1357 /* We don't need a strict matching tag, just different values */ 1358 unsigned int tag = ffs(READ_ONCE(engine->context_tag)); 1359 1360 GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); 1361 clear_bit(tag - 1, &engine->context_tag); 1362 ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); 1363 1364 BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); 1365 } 1366 1367 ce->lrc.ccid |= engine->execlists.ccid; 1368 1369 __intel_gt_pm_get(engine->gt); 1370 if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active)) 1371 intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); 1372 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 1373 intel_engine_context_in(engine); 1374 1375 return engine; 1376 } 1377 1378 static inline struct i915_request * 1379 execlists_schedule_in(struct i915_request *rq, int idx) 1380 { 1381 struct intel_context * const ce = rq->context; 1382 struct intel_engine_cs *old; 1383 1384 GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); 1385 trace_i915_request_in(rq, idx); 1386 1387 old = READ_ONCE(ce->inflight); 1388 do { 1389 if (!old) { 1390 WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq)); 1391 break; 1392 } 1393 } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old))); 1394 1395 GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); 1396 return i915_request_get(rq); 1397 } 1398 1399 static void kick_siblings(struct i915_request *rq, struct intel_context *ce) 1400 { 1401 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 1402 struct i915_request *next = READ_ONCE(ve->request); 1403 1404 if (next == rq || (next && next->execution_mask & ~rq->execution_mask)) 1405 tasklet_hi_schedule(&ve->base.execlists.tasklet); 1406 } 1407 1408 static inline void 1409 __execlists_schedule_out(struct i915_request *rq, 1410 struct intel_engine_cs * const engine, 1411 unsigned int ccid) 1412 { 1413 struct intel_context * const ce = rq->context; 1414 1415 /* 1416 * NB process_csb() is not under the engine->active.lock and hence 1417 * schedule_out can race with schedule_in meaning that we should 1418 * refrain from doing non-trivial work here. 1419 */ 1420 1421 /* 1422 * If we have just completed this context, the engine may now be 1423 * idle and we want to re-enter powersaving. 1424 */ 1425 if (list_is_last_rcu(&rq->link, &ce->timeline->requests) && 1426 i915_request_completed(rq)) 1427 intel_engine_add_retire(engine, ce->timeline); 1428 1429 ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; 1430 ccid &= GEN12_MAX_CONTEXT_HW_ID; 1431 if (ccid < BITS_PER_LONG) { 1432 GEM_BUG_ON(ccid == 0); 1433 GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); 1434 set_bit(ccid - 1, &engine->context_tag); 1435 } 1436 1437 intel_context_update_runtime(ce); 1438 intel_engine_context_out(engine); 1439 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); 1440 if (engine->fw_domain && !atomic_dec_return(&engine->fw_active)) 1441 intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); 1442 intel_gt_pm_put_async(engine->gt); 1443 1444 /* 1445 * If this is part of a virtual engine, its next request may 1446 * have been blocked waiting for access to the active context. 1447 * We have to kick all the siblings again in case we need to 1448 * switch (e.g. the next request is not runnable on this 1449 * engine). Hopefully, we will already have submitted the next 1450 * request before the tasklet runs and do not need to rebuild 1451 * each virtual tree and kick everyone again. 1452 */ 1453 if (ce->engine != engine) 1454 kick_siblings(rq, ce); 1455 1456 intel_context_put(ce); 1457 } 1458 1459 static inline void 1460 execlists_schedule_out(struct i915_request *rq) 1461 { 1462 struct intel_context * const ce = rq->context; 1463 struct intel_engine_cs *cur, *old; 1464 u32 ccid; 1465 1466 trace_i915_request_out(rq); 1467 1468 ccid = rq->context->lrc.ccid; 1469 old = READ_ONCE(ce->inflight); 1470 do 1471 cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; 1472 while (!try_cmpxchg(&ce->inflight, &old, cur)); 1473 if (!cur) 1474 __execlists_schedule_out(rq, old, ccid); 1475 1476 i915_request_put(rq); 1477 } 1478 1479 static u64 execlists_update_context(struct i915_request *rq) 1480 { 1481 struct intel_context *ce = rq->context; 1482 u64 desc = ce->lrc.desc; 1483 u32 tail, prev; 1484 1485 /* 1486 * WaIdleLiteRestore:bdw,skl 1487 * 1488 * We should never submit the context with the same RING_TAIL twice 1489 * just in case we submit an empty ring, which confuses the HW. 1490 * 1491 * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of 1492 * the normal request to be able to always advance the RING_TAIL on 1493 * subsequent resubmissions (for lite restore). Should that fail us, 1494 * and we try and submit the same tail again, force the context 1495 * reload. 1496 * 1497 * If we need to return to a preempted context, we need to skip the 1498 * lite-restore and force it to reload the RING_TAIL. Otherwise, the 1499 * HW has a tendency to ignore us rewinding the TAIL to the end of 1500 * an earlier request. 1501 */ 1502 GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); 1503 prev = rq->ring->tail; 1504 tail = intel_ring_set_tail(rq->ring, rq->tail); 1505 if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) 1506 desc |= CTX_DESC_FORCE_RESTORE; 1507 ce->lrc_reg_state[CTX_RING_TAIL] = tail; 1508 rq->tail = rq->wa_tail; 1509 1510 /* 1511 * Make sure the context image is complete before we submit it to HW. 1512 * 1513 * Ostensibly, writes (including the WCB) should be flushed prior to 1514 * an uncached write such as our mmio register access, the empirical 1515 * evidence (esp. on Braswell) suggests that the WC write into memory 1516 * may not be visible to the HW prior to the completion of the UC 1517 * register write and that we may begin execution from the context 1518 * before its image is complete leading to invalid PD chasing. 1519 */ 1520 wmb(); 1521 1522 ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; 1523 return desc; 1524 } 1525 1526 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 1527 { 1528 if (execlists->ctrl_reg) { 1529 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); 1530 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); 1531 } else { 1532 writel(upper_32_bits(desc), execlists->submit_reg); 1533 writel(lower_32_bits(desc), execlists->submit_reg); 1534 } 1535 } 1536 1537 static __maybe_unused char * 1538 dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) 1539 { 1540 if (!rq) 1541 return ""; 1542 1543 snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", 1544 prefix, 1545 rq->context->lrc.ccid, 1546 rq->fence.context, rq->fence.seqno, 1547 i915_request_completed(rq) ? "!" : 1548 i915_request_started(rq) ? "*" : 1549 "", 1550 rq_prio(rq)); 1551 1552 return buf; 1553 } 1554 1555 static __maybe_unused void 1556 trace_ports(const struct intel_engine_execlists *execlists, 1557 const char *msg, 1558 struct i915_request * const *ports) 1559 { 1560 const struct intel_engine_cs *engine = 1561 container_of(execlists, typeof(*engine), execlists); 1562 char __maybe_unused p0[40], p1[40]; 1563 1564 if (!ports[0]) 1565 return; 1566 1567 ENGINE_TRACE(engine, "%s { %s%s }\n", msg, 1568 dump_port(p0, sizeof(p0), "", ports[0]), 1569 dump_port(p1, sizeof(p1), ", ", ports[1])); 1570 } 1571 1572 static inline bool 1573 reset_in_progress(const struct intel_engine_execlists *execlists) 1574 { 1575 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); 1576 } 1577 1578 static __maybe_unused bool 1579 assert_pending_valid(const struct intel_engine_execlists *execlists, 1580 const char *msg) 1581 { 1582 struct intel_engine_cs *engine = 1583 container_of(execlists, typeof(*engine), execlists); 1584 struct i915_request * const *port, *rq; 1585 struct intel_context *ce = NULL; 1586 bool sentinel = false; 1587 u32 ccid = -1; 1588 1589 trace_ports(execlists, msg, execlists->pending); 1590 1591 /* We may be messing around with the lists during reset, lalala */ 1592 if (reset_in_progress(execlists)) 1593 return true; 1594 1595 if (!execlists->pending[0]) { 1596 GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", 1597 engine->name); 1598 return false; 1599 } 1600 1601 if (execlists->pending[execlists_num_ports(execlists)]) { 1602 GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", 1603 engine->name, execlists_num_ports(execlists)); 1604 return false; 1605 } 1606 1607 for (port = execlists->pending; (rq = *port); port++) { 1608 unsigned long flags; 1609 bool ok = true; 1610 1611 GEM_BUG_ON(!kref_read(&rq->fence.refcount)); 1612 GEM_BUG_ON(!i915_request_is_active(rq)); 1613 1614 if (ce == rq->context) { 1615 GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", 1616 engine->name, 1617 ce->timeline->fence_context, 1618 port - execlists->pending); 1619 return false; 1620 } 1621 ce = rq->context; 1622 1623 if (ccid == ce->lrc.ccid) { 1624 GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", 1625 engine->name, 1626 ccid, ce->timeline->fence_context, 1627 port - execlists->pending); 1628 return false; 1629 } 1630 ccid = ce->lrc.ccid; 1631 1632 /* 1633 * Sentinels are supposed to be the last request so they flush 1634 * the current execution off the HW. Check that they are the only 1635 * request in the pending submission. 1636 */ 1637 if (sentinel) { 1638 GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", 1639 engine->name, 1640 ce->timeline->fence_context, 1641 port - execlists->pending); 1642 return false; 1643 } 1644 sentinel = i915_request_has_sentinel(rq); 1645 1646 /* Hold tightly onto the lock to prevent concurrent retires! */ 1647 if (!spin_trylock_irqsave(&rq->lock, flags)) 1648 continue; 1649 1650 if (i915_request_completed(rq)) 1651 goto unlock; 1652 1653 if (i915_active_is_idle(&ce->active) && 1654 !intel_context_is_barrier(ce)) { 1655 GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", 1656 engine->name, 1657 ce->timeline->fence_context, 1658 port - execlists->pending); 1659 ok = false; 1660 goto unlock; 1661 } 1662 1663 if (!i915_vma_is_pinned(ce->state)) { 1664 GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", 1665 engine->name, 1666 ce->timeline->fence_context, 1667 port - execlists->pending); 1668 ok = false; 1669 goto unlock; 1670 } 1671 1672 if (!i915_vma_is_pinned(ce->ring->vma)) { 1673 GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", 1674 engine->name, 1675 ce->timeline->fence_context, 1676 port - execlists->pending); 1677 ok = false; 1678 goto unlock; 1679 } 1680 1681 unlock: 1682 spin_unlock_irqrestore(&rq->lock, flags); 1683 if (!ok) 1684 return false; 1685 } 1686 1687 return ce; 1688 } 1689 1690 static void execlists_submit_ports(struct intel_engine_cs *engine) 1691 { 1692 struct intel_engine_execlists *execlists = &engine->execlists; 1693 unsigned int n; 1694 1695 GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); 1696 1697 /* 1698 * We can skip acquiring intel_runtime_pm_get() here as it was taken 1699 * on our behalf by the request (see i915_gem_mark_busy()) and it will 1700 * not be relinquished until the device is idle (see 1701 * i915_gem_idle_work_handler()). As a precaution, we make sure 1702 * that all ELSP are drained i.e. we have processed the CSB, 1703 * before allowing ourselves to idle and calling intel_runtime_pm_put(). 1704 */ 1705 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 1706 1707 /* 1708 * ELSQ note: the submit queue is not cleared after being submitted 1709 * to the HW so we need to make sure we always clean it up. This is 1710 * currently ensured by the fact that we always write the same number 1711 * of elsq entries, keep this in mind before changing the loop below. 1712 */ 1713 for (n = execlists_num_ports(execlists); n--; ) { 1714 struct i915_request *rq = execlists->pending[n]; 1715 1716 write_desc(execlists, 1717 rq ? execlists_update_context(rq) : 0, 1718 n); 1719 } 1720 1721 /* we need to manually load the submit queue */ 1722 if (execlists->ctrl_reg) 1723 writel(EL_CTRL_LOAD, execlists->ctrl_reg); 1724 } 1725 1726 static bool ctx_single_port_submission(const struct intel_context *ce) 1727 { 1728 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 1729 intel_context_force_single_submission(ce)); 1730 } 1731 1732 static bool can_merge_ctx(const struct intel_context *prev, 1733 const struct intel_context *next) 1734 { 1735 if (prev != next) 1736 return false; 1737 1738 if (ctx_single_port_submission(prev)) 1739 return false; 1740 1741 return true; 1742 } 1743 1744 static unsigned long i915_request_flags(const struct i915_request *rq) 1745 { 1746 return READ_ONCE(rq->fence.flags); 1747 } 1748 1749 static bool can_merge_rq(const struct i915_request *prev, 1750 const struct i915_request *next) 1751 { 1752 GEM_BUG_ON(prev == next); 1753 GEM_BUG_ON(!assert_priority_queue(prev, next)); 1754 1755 /* 1756 * We do not submit known completed requests. Therefore if the next 1757 * request is already completed, we can pretend to merge it in 1758 * with the previous context (and we will skip updating the ELSP 1759 * and tracking). Thus hopefully keeping the ELSP full with active 1760 * contexts, despite the best efforts of preempt-to-busy to confuse 1761 * us. 1762 */ 1763 if (i915_request_completed(next)) 1764 return true; 1765 1766 if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) & 1767 (BIT(I915_FENCE_FLAG_NOPREEMPT) | 1768 BIT(I915_FENCE_FLAG_SENTINEL)))) 1769 return false; 1770 1771 if (!can_merge_ctx(prev->context, next->context)) 1772 return false; 1773 1774 GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); 1775 return true; 1776 } 1777 1778 static void virtual_update_register_offsets(u32 *regs, 1779 struct intel_engine_cs *engine) 1780 { 1781 set_offsets(regs, reg_offsets(engine), engine, false); 1782 } 1783 1784 static bool virtual_matches(const struct virtual_engine *ve, 1785 const struct i915_request *rq, 1786 const struct intel_engine_cs *engine) 1787 { 1788 const struct intel_engine_cs *inflight; 1789 1790 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ 1791 return false; 1792 1793 /* 1794 * We track when the HW has completed saving the context image 1795 * (i.e. when we have seen the final CS event switching out of 1796 * the context) and must not overwrite the context image before 1797 * then. This restricts us to only using the active engine 1798 * while the previous virtualized request is inflight (so 1799 * we reuse the register offsets). This is a very small 1800 * hystersis on the greedy seelction algorithm. 1801 */ 1802 inflight = intel_context_inflight(&ve->context); 1803 if (inflight && inflight != engine) 1804 return false; 1805 1806 return true; 1807 } 1808 1809 static void virtual_xfer_context(struct virtual_engine *ve, 1810 struct intel_engine_cs *engine) 1811 { 1812 unsigned int n; 1813 1814 if (likely(engine == ve->siblings[0])) 1815 return; 1816 1817 GEM_BUG_ON(READ_ONCE(ve->context.inflight)); 1818 if (!intel_engine_has_relative_mmio(engine)) 1819 virtual_update_register_offsets(ve->context.lrc_reg_state, 1820 engine); 1821 1822 /* 1823 * Move the bound engine to the top of the list for 1824 * future execution. We then kick this tasklet first 1825 * before checking others, so that we preferentially 1826 * reuse this set of bound registers. 1827 */ 1828 for (n = 1; n < ve->num_siblings; n++) { 1829 if (ve->siblings[n] == engine) { 1830 swap(ve->siblings[n], ve->siblings[0]); 1831 break; 1832 } 1833 } 1834 } 1835 1836 #define for_each_waiter(p__, rq__) \ 1837 list_for_each_entry_lockless(p__, \ 1838 &(rq__)->sched.waiters_list, \ 1839 wait_link) 1840 1841 #define for_each_signaler(p__, rq__) \ 1842 list_for_each_entry_rcu(p__, \ 1843 &(rq__)->sched.signalers_list, \ 1844 signal_link) 1845 1846 static void defer_request(struct i915_request *rq, struct list_head * const pl) 1847 { 1848 LIST_HEAD(list); 1849 1850 /* 1851 * We want to move the interrupted request to the back of 1852 * the round-robin list (i.e. its priority level), but 1853 * in doing so, we must then move all requests that were in 1854 * flight and were waiting for the interrupted request to 1855 * be run after it again. 1856 */ 1857 do { 1858 struct i915_dependency *p; 1859 1860 GEM_BUG_ON(i915_request_is_active(rq)); 1861 list_move_tail(&rq->sched.link, pl); 1862 1863 for_each_waiter(p, rq) { 1864 struct i915_request *w = 1865 container_of(p->waiter, typeof(*w), sched); 1866 1867 if (p->flags & I915_DEPENDENCY_WEAK) 1868 continue; 1869 1870 /* Leave semaphores spinning on the other engines */ 1871 if (w->engine != rq->engine) 1872 continue; 1873 1874 /* No waiter should start before its signaler */ 1875 GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) && 1876 i915_request_started(w) && 1877 !i915_request_completed(rq)); 1878 1879 GEM_BUG_ON(i915_request_is_active(w)); 1880 if (!i915_request_is_ready(w)) 1881 continue; 1882 1883 if (rq_prio(w) < rq_prio(rq)) 1884 continue; 1885 1886 GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); 1887 list_move_tail(&w->sched.link, &list); 1888 } 1889 1890 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 1891 } while (rq); 1892 } 1893 1894 static void defer_active(struct intel_engine_cs *engine) 1895 { 1896 struct i915_request *rq; 1897 1898 rq = __unwind_incomplete_requests(engine); 1899 if (!rq) 1900 return; 1901 1902 defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); 1903 } 1904 1905 static bool 1906 need_timeslice(const struct intel_engine_cs *engine, 1907 const struct i915_request *rq, 1908 const struct rb_node *rb) 1909 { 1910 int hint; 1911 1912 if (!intel_engine_has_timeslices(engine)) 1913 return false; 1914 1915 hint = engine->execlists.queue_priority_hint; 1916 1917 if (rb) { 1918 const struct virtual_engine *ve = 1919 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 1920 const struct intel_engine_cs *inflight = 1921 intel_context_inflight(&ve->context); 1922 1923 if (!inflight || inflight == engine) { 1924 struct i915_request *next; 1925 1926 rcu_read_lock(); 1927 next = READ_ONCE(ve->request); 1928 if (next) 1929 hint = max(hint, rq_prio(next)); 1930 rcu_read_unlock(); 1931 } 1932 } 1933 1934 if (!list_is_last(&rq->sched.link, &engine->active.requests)) 1935 hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); 1936 1937 GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE); 1938 return hint >= effective_prio(rq); 1939 } 1940 1941 static bool 1942 timeslice_yield(const struct intel_engine_execlists *el, 1943 const struct i915_request *rq) 1944 { 1945 /* 1946 * Once bitten, forever smitten! 1947 * 1948 * If the active context ever busy-waited on a semaphore, 1949 * it will be treated as a hog until the end of its timeslice (i.e. 1950 * until it is scheduled out and replaced by a new submission, 1951 * possibly even its own lite-restore). The HW only sends an interrupt 1952 * on the first miss, and we do know if that semaphore has been 1953 * signaled, or even if it is now stuck on another semaphore. Play 1954 * safe, yield if it might be stuck -- it will be given a fresh 1955 * timeslice in the near future. 1956 */ 1957 return rq->context->lrc.ccid == READ_ONCE(el->yield); 1958 } 1959 1960 static bool 1961 timeslice_expired(const struct intel_engine_execlists *el, 1962 const struct i915_request *rq) 1963 { 1964 return timer_expired(&el->timer) || timeslice_yield(el, rq); 1965 } 1966 1967 static int 1968 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) 1969 { 1970 if (list_is_last(&rq->sched.link, &engine->active.requests)) 1971 return engine->execlists.queue_priority_hint; 1972 1973 return rq_prio(list_next_entry(rq, sched.link)); 1974 } 1975 1976 static inline unsigned long 1977 timeslice(const struct intel_engine_cs *engine) 1978 { 1979 return READ_ONCE(engine->props.timeslice_duration_ms); 1980 } 1981 1982 static unsigned long active_timeslice(const struct intel_engine_cs *engine) 1983 { 1984 const struct intel_engine_execlists *execlists = &engine->execlists; 1985 const struct i915_request *rq = *execlists->active; 1986 1987 if (!rq || i915_request_completed(rq)) 1988 return 0; 1989 1990 if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq)) 1991 return 0; 1992 1993 return timeslice(engine); 1994 } 1995 1996 static void set_timeslice(struct intel_engine_cs *engine) 1997 { 1998 unsigned long duration; 1999 2000 if (!intel_engine_has_timeslices(engine)) 2001 return; 2002 2003 duration = active_timeslice(engine); 2004 ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration); 2005 2006 set_timer_ms(&engine->execlists.timer, duration); 2007 } 2008 2009 static void start_timeslice(struct intel_engine_cs *engine, int prio) 2010 { 2011 struct intel_engine_execlists *execlists = &engine->execlists; 2012 unsigned long duration; 2013 2014 if (!intel_engine_has_timeslices(engine)) 2015 return; 2016 2017 WRITE_ONCE(execlists->switch_priority_hint, prio); 2018 if (prio == INT_MIN) 2019 return; 2020 2021 if (timer_pending(&execlists->timer)) 2022 return; 2023 2024 duration = timeslice(engine); 2025 ENGINE_TRACE(engine, 2026 "start timeslicing, prio:%d, interval:%lu", 2027 prio, duration); 2028 2029 set_timer_ms(&execlists->timer, duration); 2030 } 2031 2032 static void record_preemption(struct intel_engine_execlists *execlists) 2033 { 2034 (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); 2035 } 2036 2037 static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, 2038 const struct i915_request *rq) 2039 { 2040 if (!rq) 2041 return 0; 2042 2043 /* Force a fast reset for terminated contexts (ignoring sysfs!) */ 2044 if (unlikely(intel_context_is_banned(rq->context))) 2045 return 1; 2046 2047 return READ_ONCE(engine->props.preempt_timeout_ms); 2048 } 2049 2050 static void set_preempt_timeout(struct intel_engine_cs *engine, 2051 const struct i915_request *rq) 2052 { 2053 if (!intel_engine_has_preempt_reset(engine)) 2054 return; 2055 2056 set_timer_ms(&engine->execlists.preempt, 2057 active_preempt_timeout(engine, rq)); 2058 } 2059 2060 static inline void clear_ports(struct i915_request **ports, int count) 2061 { 2062 memset_p((void **)ports, NULL, count); 2063 } 2064 2065 static inline void 2066 copy_ports(struct i915_request **dst, struct i915_request **src, int count) 2067 { 2068 /* A memcpy_p() would be very useful here! */ 2069 while (count--) 2070 WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ 2071 } 2072 2073 static void execlists_dequeue(struct intel_engine_cs *engine) 2074 { 2075 struct intel_engine_execlists * const execlists = &engine->execlists; 2076 struct i915_request **port = execlists->pending; 2077 struct i915_request ** const last_port = port + execlists->port_mask; 2078 struct i915_request * const *active; 2079 struct i915_request *last; 2080 struct rb_node *rb; 2081 bool submit = false; 2082 2083 /* 2084 * Hardware submission is through 2 ports. Conceptually each port 2085 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 2086 * static for a context, and unique to each, so we only execute 2087 * requests belonging to a single context from each ring. RING_HEAD 2088 * is maintained by the CS in the context image, it marks the place 2089 * where it got up to last time, and through RING_TAIL we tell the CS 2090 * where we want to execute up to this time. 2091 * 2092 * In this list the requests are in order of execution. Consecutive 2093 * requests from the same context are adjacent in the ringbuffer. We 2094 * can combine these requests into a single RING_TAIL update: 2095 * 2096 * RING_HEAD...req1...req2 2097 * ^- RING_TAIL 2098 * since to execute req2 the CS must first execute req1. 2099 * 2100 * Our goal then is to point each port to the end of a consecutive 2101 * sequence of requests as being the most optimal (fewest wake ups 2102 * and context switches) submission. 2103 */ 2104 2105 for (rb = rb_first_cached(&execlists->virtual); rb; ) { 2106 struct virtual_engine *ve = 2107 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2108 struct i915_request *rq = READ_ONCE(ve->request); 2109 2110 if (!rq) { /* lazily cleanup after another engine handled rq */ 2111 rb_erase_cached(rb, &execlists->virtual); 2112 RB_CLEAR_NODE(rb); 2113 rb = rb_first_cached(&execlists->virtual); 2114 continue; 2115 } 2116 2117 if (!virtual_matches(ve, rq, engine)) { 2118 rb = rb_next(rb); 2119 continue; 2120 } 2121 2122 break; 2123 } 2124 2125 /* 2126 * If the queue is higher priority than the last 2127 * request in the currently active context, submit afresh. 2128 * We will resubmit again afterwards in case we need to split 2129 * the active context to interject the preemption request, 2130 * i.e. we will retrigger preemption following the ack in case 2131 * of trouble. 2132 */ 2133 active = READ_ONCE(execlists->active); 2134 2135 /* 2136 * In theory we can skip over completed contexts that have not 2137 * yet been processed by events (as those events are in flight): 2138 * 2139 * while ((last = *active) && i915_request_completed(last)) 2140 * active++; 2141 * 2142 * However, the GPU cannot handle this as it will ultimately 2143 * find itself trying to jump back into a context it has just 2144 * completed and barf. 2145 */ 2146 2147 if ((last = *active)) { 2148 if (need_preempt(engine, last, rb)) { 2149 if (i915_request_completed(last)) { 2150 tasklet_hi_schedule(&execlists->tasklet); 2151 return; 2152 } 2153 2154 ENGINE_TRACE(engine, 2155 "preempting last=%llx:%lld, prio=%d, hint=%d\n", 2156 last->fence.context, 2157 last->fence.seqno, 2158 last->sched.attr.priority, 2159 execlists->queue_priority_hint); 2160 record_preemption(execlists); 2161 2162 /* 2163 * Don't let the RING_HEAD advance past the breadcrumb 2164 * as we unwind (and until we resubmit) so that we do 2165 * not accidentally tell it to go backwards. 2166 */ 2167 ring_set_paused(engine, 1); 2168 2169 /* 2170 * Note that we have not stopped the GPU at this point, 2171 * so we are unwinding the incomplete requests as they 2172 * remain inflight and so by the time we do complete 2173 * the preemption, some of the unwound requests may 2174 * complete! 2175 */ 2176 __unwind_incomplete_requests(engine); 2177 2178 last = NULL; 2179 } else if (need_timeslice(engine, last, rb) && 2180 timeslice_expired(execlists, last)) { 2181 if (i915_request_completed(last)) { 2182 tasklet_hi_schedule(&execlists->tasklet); 2183 return; 2184 } 2185 2186 ENGINE_TRACE(engine, 2187 "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", 2188 last->fence.context, 2189 last->fence.seqno, 2190 last->sched.attr.priority, 2191 execlists->queue_priority_hint, 2192 yesno(timeslice_yield(execlists, last))); 2193 2194 ring_set_paused(engine, 1); 2195 defer_active(engine); 2196 2197 /* 2198 * Unlike for preemption, if we rewind and continue 2199 * executing the same context as previously active, 2200 * the order of execution will remain the same and 2201 * the tail will only advance. We do not need to 2202 * force a full context restore, as a lite-restore 2203 * is sufficient to resample the monotonic TAIL. 2204 * 2205 * If we switch to any other context, similarly we 2206 * will not rewind TAIL of current context, and 2207 * normal save/restore will preserve state and allow 2208 * us to later continue executing the same request. 2209 */ 2210 last = NULL; 2211 } else { 2212 /* 2213 * Otherwise if we already have a request pending 2214 * for execution after the current one, we can 2215 * just wait until the next CS event before 2216 * queuing more. In either case we will force a 2217 * lite-restore preemption event, but if we wait 2218 * we hopefully coalesce several updates into a single 2219 * submission. 2220 */ 2221 if (!list_is_last(&last->sched.link, 2222 &engine->active.requests)) { 2223 /* 2224 * Even if ELSP[1] is occupied and not worthy 2225 * of timeslices, our queue might be. 2226 */ 2227 start_timeslice(engine, queue_prio(execlists)); 2228 return; 2229 } 2230 } 2231 } 2232 2233 while (rb) { /* XXX virtual is always taking precedence */ 2234 struct virtual_engine *ve = 2235 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2236 struct i915_request *rq; 2237 2238 spin_lock(&ve->base.active.lock); 2239 2240 rq = ve->request; 2241 if (unlikely(!rq)) { /* lost the race to a sibling */ 2242 spin_unlock(&ve->base.active.lock); 2243 rb_erase_cached(rb, &execlists->virtual); 2244 RB_CLEAR_NODE(rb); 2245 rb = rb_first_cached(&execlists->virtual); 2246 continue; 2247 } 2248 2249 GEM_BUG_ON(rq != ve->request); 2250 GEM_BUG_ON(rq->engine != &ve->base); 2251 GEM_BUG_ON(rq->context != &ve->context); 2252 2253 if (rq_prio(rq) >= queue_prio(execlists)) { 2254 if (!virtual_matches(ve, rq, engine)) { 2255 spin_unlock(&ve->base.active.lock); 2256 rb = rb_next(rb); 2257 continue; 2258 } 2259 2260 if (last && !can_merge_rq(last, rq)) { 2261 spin_unlock(&ve->base.active.lock); 2262 start_timeslice(engine, rq_prio(rq)); 2263 return; /* leave this for another sibling */ 2264 } 2265 2266 ENGINE_TRACE(engine, 2267 "virtual rq=%llx:%lld%s, new engine? %s\n", 2268 rq->fence.context, 2269 rq->fence.seqno, 2270 i915_request_completed(rq) ? "!" : 2271 i915_request_started(rq) ? "*" : 2272 "", 2273 yesno(engine != ve->siblings[0])); 2274 2275 WRITE_ONCE(ve->request, NULL); 2276 WRITE_ONCE(ve->base.execlists.queue_priority_hint, 2277 INT_MIN); 2278 rb_erase_cached(rb, &execlists->virtual); 2279 RB_CLEAR_NODE(rb); 2280 2281 GEM_BUG_ON(!(rq->execution_mask & engine->mask)); 2282 WRITE_ONCE(rq->engine, engine); 2283 2284 if (__i915_request_submit(rq)) { 2285 /* 2286 * Only after we confirm that we will submit 2287 * this request (i.e. it has not already 2288 * completed), do we want to update the context. 2289 * 2290 * This serves two purposes. It avoids 2291 * unnecessary work if we are resubmitting an 2292 * already completed request after timeslicing. 2293 * But more importantly, it prevents us altering 2294 * ve->siblings[] on an idle context, where 2295 * we may be using ve->siblings[] in 2296 * virtual_context_enter / virtual_context_exit. 2297 */ 2298 virtual_xfer_context(ve, engine); 2299 GEM_BUG_ON(ve->siblings[0] != engine); 2300 2301 submit = true; 2302 last = rq; 2303 } 2304 i915_request_put(rq); 2305 2306 /* 2307 * Hmm, we have a bunch of virtual engine requests, 2308 * but the first one was already completed (thanks 2309 * preempt-to-busy!). Keep looking at the veng queue 2310 * until we have no more relevant requests (i.e. 2311 * the normal submit queue has higher priority). 2312 */ 2313 if (!submit) { 2314 spin_unlock(&ve->base.active.lock); 2315 rb = rb_first_cached(&execlists->virtual); 2316 continue; 2317 } 2318 } 2319 2320 spin_unlock(&ve->base.active.lock); 2321 break; 2322 } 2323 2324 while ((rb = rb_first_cached(&execlists->queue))) { 2325 struct i915_priolist *p = to_priolist(rb); 2326 struct i915_request *rq, *rn; 2327 int i; 2328 2329 priolist_for_each_request_consume(rq, rn, p, i) { 2330 bool merge = true; 2331 2332 /* 2333 * Can we combine this request with the current port? 2334 * It has to be the same context/ringbuffer and not 2335 * have any exceptions (e.g. GVT saying never to 2336 * combine contexts). 2337 * 2338 * If we can combine the requests, we can execute both 2339 * by updating the RING_TAIL to point to the end of the 2340 * second request, and so we never need to tell the 2341 * hardware about the first. 2342 */ 2343 if (last && !can_merge_rq(last, rq)) { 2344 /* 2345 * If we are on the second port and cannot 2346 * combine this request with the last, then we 2347 * are done. 2348 */ 2349 if (port == last_port) 2350 goto done; 2351 2352 /* 2353 * We must not populate both ELSP[] with the 2354 * same LRCA, i.e. we must submit 2 different 2355 * contexts if we submit 2 ELSP. 2356 */ 2357 if (last->context == rq->context) 2358 goto done; 2359 2360 if (i915_request_has_sentinel(last)) 2361 goto done; 2362 2363 /* 2364 * If GVT overrides us we only ever submit 2365 * port[0], leaving port[1] empty. Note that we 2366 * also have to be careful that we don't queue 2367 * the same context (even though a different 2368 * request) to the second port. 2369 */ 2370 if (ctx_single_port_submission(last->context) || 2371 ctx_single_port_submission(rq->context)) 2372 goto done; 2373 2374 merge = false; 2375 } 2376 2377 if (__i915_request_submit(rq)) { 2378 if (!merge) { 2379 *port = execlists_schedule_in(last, port - execlists->pending); 2380 port++; 2381 last = NULL; 2382 } 2383 2384 GEM_BUG_ON(last && 2385 !can_merge_ctx(last->context, 2386 rq->context)); 2387 GEM_BUG_ON(last && 2388 i915_seqno_passed(last->fence.seqno, 2389 rq->fence.seqno)); 2390 2391 submit = true; 2392 last = rq; 2393 } 2394 } 2395 2396 rb_erase_cached(&p->node, &execlists->queue); 2397 i915_priolist_free(p); 2398 } 2399 2400 done: 2401 /* 2402 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. 2403 * 2404 * We choose the priority hint such that if we add a request of greater 2405 * priority than this, we kick the submission tasklet to decide on 2406 * the right order of submitting the requests to hardware. We must 2407 * also be prepared to reorder requests as they are in-flight on the 2408 * HW. We derive the priority hint then as the first "hole" in 2409 * the HW submission ports and if there are no available slots, 2410 * the priority of the lowest executing request, i.e. last. 2411 * 2412 * When we do receive a higher priority request ready to run from the 2413 * user, see queue_request(), the priority hint is bumped to that 2414 * request triggering preemption on the next dequeue (or subsequent 2415 * interrupt for secondary ports). 2416 */ 2417 execlists->queue_priority_hint = queue_prio(execlists); 2418 2419 if (submit) { 2420 *port = execlists_schedule_in(last, port - execlists->pending); 2421 execlists->switch_priority_hint = 2422 switch_prio(engine, *execlists->pending); 2423 2424 /* 2425 * Skip if we ended up with exactly the same set of requests, 2426 * e.g. trying to timeslice a pair of ordered contexts 2427 */ 2428 if (!memcmp(active, execlists->pending, 2429 (port - execlists->pending + 1) * sizeof(*port))) { 2430 do 2431 execlists_schedule_out(fetch_and_zero(port)); 2432 while (port-- != execlists->pending); 2433 2434 goto skip_submit; 2435 } 2436 clear_ports(port + 1, last_port - port); 2437 2438 WRITE_ONCE(execlists->yield, -1); 2439 set_preempt_timeout(engine, *active); 2440 execlists_submit_ports(engine); 2441 } else { 2442 start_timeslice(engine, execlists->queue_priority_hint); 2443 skip_submit: 2444 ring_set_paused(engine, 0); 2445 } 2446 } 2447 2448 static void 2449 cancel_port_requests(struct intel_engine_execlists * const execlists) 2450 { 2451 struct i915_request * const *port; 2452 2453 for (port = execlists->pending; *port; port++) 2454 execlists_schedule_out(*port); 2455 clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); 2456 2457 /* Mark the end of active before we overwrite *active */ 2458 for (port = xchg(&execlists->active, execlists->pending); *port; port++) 2459 execlists_schedule_out(*port); 2460 clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); 2461 2462 smp_wmb(); /* complete the seqlock for execlists_active() */ 2463 WRITE_ONCE(execlists->active, execlists->inflight); 2464 } 2465 2466 static inline void 2467 invalidate_csb_entries(const u32 *first, const u32 *last) 2468 { 2469 clflush((void *)first); 2470 clflush((void *)last); 2471 } 2472 2473 /* 2474 * Starting with Gen12, the status has a new format: 2475 * 2476 * bit 0: switched to new queue 2477 * bit 1: reserved 2478 * bit 2: semaphore wait mode (poll or signal), only valid when 2479 * switch detail is set to "wait on semaphore" 2480 * bits 3-5: engine class 2481 * bits 6-11: engine instance 2482 * bits 12-14: reserved 2483 * bits 15-25: sw context id of the lrc the GT switched to 2484 * bits 26-31: sw counter of the lrc the GT switched to 2485 * bits 32-35: context switch detail 2486 * - 0: ctx complete 2487 * - 1: wait on sync flip 2488 * - 2: wait on vblank 2489 * - 3: wait on scanline 2490 * - 4: wait on semaphore 2491 * - 5: context preempted (not on SEMAPHORE_WAIT or 2492 * WAIT_FOR_EVENT) 2493 * bit 36: reserved 2494 * bits 37-43: wait detail (for switch detail 1 to 4) 2495 * bits 44-46: reserved 2496 * bits 47-57: sw context id of the lrc the GT switched away from 2497 * bits 58-63: sw counter of the lrc the GT switched away from 2498 */ 2499 static inline bool 2500 gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) 2501 { 2502 u32 lower_dw = csb[0]; 2503 u32 upper_dw = csb[1]; 2504 bool ctx_to_valid = GEN12_CSB_CTX_VALID(lower_dw); 2505 bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); 2506 bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; 2507 2508 /* 2509 * The context switch detail is not guaranteed to be 5 when a preemption 2510 * occurs, so we can't just check for that. The check below works for 2511 * all the cases we care about, including preemptions of WAIT 2512 * instructions and lite-restore. Preempt-to-idle via the CTRL register 2513 * would require some extra handling, but we don't support that. 2514 */ 2515 if (!ctx_away_valid || new_queue) { 2516 GEM_BUG_ON(!ctx_to_valid); 2517 return true; 2518 } 2519 2520 /* 2521 * switch detail = 5 is covered by the case above and we do not expect a 2522 * context switch on an unsuccessful wait instruction since we always 2523 * use polling mode. 2524 */ 2525 GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); 2526 return false; 2527 } 2528 2529 static inline bool 2530 gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) 2531 { 2532 return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); 2533 } 2534 2535 static void process_csb(struct intel_engine_cs *engine) 2536 { 2537 struct intel_engine_execlists * const execlists = &engine->execlists; 2538 const u32 * const buf = execlists->csb_status; 2539 const u8 num_entries = execlists->csb_size; 2540 u8 head, tail; 2541 2542 /* 2543 * As we modify our execlists state tracking we require exclusive 2544 * access. Either we are inside the tasklet, or the tasklet is disabled 2545 * and we assume that is only inside the reset paths and so serialised. 2546 */ 2547 GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && 2548 !reset_in_progress(execlists)); 2549 GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); 2550 2551 /* 2552 * Note that csb_write, csb_status may be either in HWSP or mmio. 2553 * When reading from the csb_write mmio register, we have to be 2554 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is 2555 * the low 4bits. As it happens we know the next 4bits are always 2556 * zero and so we can simply masked off the low u8 of the register 2557 * and treat it identically to reading from the HWSP (without having 2558 * to use explicit shifting and masking, and probably bifurcating 2559 * the code to handle the legacy mmio read). 2560 */ 2561 head = execlists->csb_head; 2562 tail = READ_ONCE(*execlists->csb_write); 2563 if (unlikely(head == tail)) 2564 return; 2565 2566 /* 2567 * We will consume all events from HW, or at least pretend to. 2568 * 2569 * The sequence of events from the HW is deterministic, and derived 2570 * from our writes to the ELSP, with a smidgen of variability for 2571 * the arrival of the asynchronous requests wrt to the inflight 2572 * execution. If the HW sends an event that does not correspond with 2573 * the one we are expecting, we have to abandon all hope as we lose 2574 * all tracking of what the engine is actually executing. We will 2575 * only detect we are out of sequence with the HW when we get an 2576 * 'impossible' event because we have already drained our own 2577 * preemption/promotion queue. If this occurs, we know that we likely 2578 * lost track of execution earlier and must unwind and restart, the 2579 * simplest way is by stop processing the event queue and force the 2580 * engine to reset. 2581 */ 2582 execlists->csb_head = tail; 2583 ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); 2584 2585 /* 2586 * Hopefully paired with a wmb() in HW! 2587 * 2588 * We must complete the read of the write pointer before any reads 2589 * from the CSB, so that we do not see stale values. Without an rmb 2590 * (lfence) the HW may speculatively perform the CSB[] reads *before* 2591 * we perform the READ_ONCE(*csb_write). 2592 */ 2593 rmb(); 2594 do { 2595 bool promote; 2596 2597 if (++head == num_entries) 2598 head = 0; 2599 2600 /* 2601 * We are flying near dragons again. 2602 * 2603 * We hold a reference to the request in execlist_port[] 2604 * but no more than that. We are operating in softirq 2605 * context and so cannot hold any mutex or sleep. That 2606 * prevents us stopping the requests we are processing 2607 * in port[] from being retired simultaneously (the 2608 * breadcrumb will be complete before we see the 2609 * context-switch). As we only hold the reference to the 2610 * request, any pointer chasing underneath the request 2611 * is subject to a potential use-after-free. Thus we 2612 * store all of the bookkeeping within port[] as 2613 * required, and avoid using unguarded pointers beneath 2614 * request itself. The same applies to the atomic 2615 * status notifier. 2616 */ 2617 2618 ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", 2619 head, buf[2 * head + 0], buf[2 * head + 1]); 2620 2621 if (INTEL_GEN(engine->i915) >= 12) 2622 promote = gen12_csb_parse(execlists, buf + 2 * head); 2623 else 2624 promote = gen8_csb_parse(execlists, buf + 2 * head); 2625 if (promote) { 2626 struct i915_request * const *old = execlists->active; 2627 2628 if (GEM_WARN_ON(!*execlists->pending)) { 2629 execlists->error_interrupt |= ERROR_CSB; 2630 break; 2631 } 2632 2633 ring_set_paused(engine, 0); 2634 2635 /* Point active to the new ELSP; prevent overwriting */ 2636 WRITE_ONCE(execlists->active, execlists->pending); 2637 smp_wmb(); /* notify execlists_active() */ 2638 2639 /* cancel old inflight, prepare for switch */ 2640 trace_ports(execlists, "preempted", old); 2641 while (*old) 2642 execlists_schedule_out(*old++); 2643 2644 /* switch pending to inflight */ 2645 GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); 2646 copy_ports(execlists->inflight, 2647 execlists->pending, 2648 execlists_num_ports(execlists)); 2649 smp_wmb(); /* complete the seqlock */ 2650 WRITE_ONCE(execlists->active, execlists->inflight); 2651 2652 WRITE_ONCE(execlists->pending[0], NULL); 2653 } else { 2654 if (GEM_WARN_ON(!*execlists->active)) { 2655 execlists->error_interrupt |= ERROR_CSB; 2656 break; 2657 } 2658 2659 /* port0 completed, advanced to port1 */ 2660 trace_ports(execlists, "completed", execlists->active); 2661 2662 /* 2663 * We rely on the hardware being strongly 2664 * ordered, that the breadcrumb write is 2665 * coherent (visible from the CPU) before the 2666 * user interrupt is processed. One might assume 2667 * that the breadcrumb write being before the 2668 * user interrupt and the CS event for the context 2669 * switch would therefore be before the CS event 2670 * itself... 2671 */ 2672 if (GEM_SHOW_DEBUG() && 2673 !i915_request_completed(*execlists->active)) { 2674 struct i915_request *rq = *execlists->active; 2675 const u32 *regs __maybe_unused = 2676 rq->context->lrc_reg_state; 2677 2678 ENGINE_TRACE(engine, 2679 "context completed before request!\n"); 2680 ENGINE_TRACE(engine, 2681 "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", 2682 ENGINE_READ(engine, RING_START), 2683 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, 2684 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, 2685 ENGINE_READ(engine, RING_CTL), 2686 ENGINE_READ(engine, RING_MI_MODE)); 2687 ENGINE_TRACE(engine, 2688 "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", 2689 i915_ggtt_offset(rq->ring->vma), 2690 rq->head, rq->tail, 2691 rq->fence.context, 2692 lower_32_bits(rq->fence.seqno), 2693 hwsp_seqno(rq)); 2694 ENGINE_TRACE(engine, 2695 "ctx:{start:%08x, head:%04x, tail:%04x}, ", 2696 regs[CTX_RING_START], 2697 regs[CTX_RING_HEAD], 2698 regs[CTX_RING_TAIL]); 2699 } 2700 2701 execlists_schedule_out(*execlists->active++); 2702 2703 GEM_BUG_ON(execlists->active - execlists->inflight > 2704 execlists_num_ports(execlists)); 2705 } 2706 } while (head != tail); 2707 2708 set_timeslice(engine); 2709 2710 /* 2711 * Gen11 has proven to fail wrt global observation point between 2712 * entry and tail update, failing on the ordering and thus 2713 * we see an old entry in the context status buffer. 2714 * 2715 * Forcibly evict out entries for the next gpu csb update, 2716 * to increase the odds that we get a fresh entries with non 2717 * working hardware. The cost for doing so comes out mostly with 2718 * the wash as hardware, working or not, will need to do the 2719 * invalidation before. 2720 */ 2721 invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); 2722 } 2723 2724 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 2725 { 2726 lockdep_assert_held(&engine->active.lock); 2727 if (!READ_ONCE(engine->execlists.pending[0])) { 2728 rcu_read_lock(); /* protect peeking at execlists->active */ 2729 execlists_dequeue(engine); 2730 rcu_read_unlock(); 2731 } 2732 } 2733 2734 static void __execlists_hold(struct i915_request *rq) 2735 { 2736 LIST_HEAD(list); 2737 2738 do { 2739 struct i915_dependency *p; 2740 2741 if (i915_request_is_active(rq)) 2742 __i915_request_unsubmit(rq); 2743 2744 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 2745 list_move_tail(&rq->sched.link, &rq->engine->active.hold); 2746 i915_request_set_hold(rq); 2747 RQ_TRACE(rq, "on hold\n"); 2748 2749 for_each_waiter(p, rq) { 2750 struct i915_request *w = 2751 container_of(p->waiter, typeof(*w), sched); 2752 2753 /* Leave semaphores spinning on the other engines */ 2754 if (w->engine != rq->engine) 2755 continue; 2756 2757 if (!i915_request_is_ready(w)) 2758 continue; 2759 2760 if (i915_request_completed(w)) 2761 continue; 2762 2763 if (i915_request_on_hold(w)) 2764 continue; 2765 2766 list_move_tail(&w->sched.link, &list); 2767 } 2768 2769 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 2770 } while (rq); 2771 } 2772 2773 static bool execlists_hold(struct intel_engine_cs *engine, 2774 struct i915_request *rq) 2775 { 2776 spin_lock_irq(&engine->active.lock); 2777 2778 if (i915_request_completed(rq)) { /* too late! */ 2779 rq = NULL; 2780 goto unlock; 2781 } 2782 2783 if (rq->engine != engine) { /* preempted virtual engine */ 2784 struct virtual_engine *ve = to_virtual_engine(rq->engine); 2785 2786 /* 2787 * intel_context_inflight() is only protected by virtue 2788 * of process_csb() being called only by the tasklet (or 2789 * directly from inside reset while the tasklet is suspended). 2790 * Assert that neither of those are allowed to run while we 2791 * poke at the request queues. 2792 */ 2793 GEM_BUG_ON(!reset_in_progress(&engine->execlists)); 2794 2795 /* 2796 * An unsubmitted request along a virtual engine will 2797 * remain on the active (this) engine until we are able 2798 * to process the context switch away (and so mark the 2799 * context as no longer in flight). That cannot have happened 2800 * yet, otherwise we would not be hanging! 2801 */ 2802 spin_lock(&ve->base.active.lock); 2803 GEM_BUG_ON(intel_context_inflight(rq->context) != engine); 2804 GEM_BUG_ON(ve->request != rq); 2805 ve->request = NULL; 2806 spin_unlock(&ve->base.active.lock); 2807 i915_request_put(rq); 2808 2809 rq->engine = engine; 2810 } 2811 2812 /* 2813 * Transfer this request onto the hold queue to prevent it 2814 * being resumbitted to HW (and potentially completed) before we have 2815 * released it. Since we may have already submitted following 2816 * requests, we need to remove those as well. 2817 */ 2818 GEM_BUG_ON(i915_request_on_hold(rq)); 2819 GEM_BUG_ON(rq->engine != engine); 2820 __execlists_hold(rq); 2821 GEM_BUG_ON(list_empty(&engine->active.hold)); 2822 2823 unlock: 2824 spin_unlock_irq(&engine->active.lock); 2825 return rq; 2826 } 2827 2828 static bool hold_request(const struct i915_request *rq) 2829 { 2830 struct i915_dependency *p; 2831 bool result = false; 2832 2833 /* 2834 * If one of our ancestors is on hold, we must also be on hold, 2835 * otherwise we will bypass it and execute before it. 2836 */ 2837 rcu_read_lock(); 2838 for_each_signaler(p, rq) { 2839 const struct i915_request *s = 2840 container_of(p->signaler, typeof(*s), sched); 2841 2842 if (s->engine != rq->engine) 2843 continue; 2844 2845 result = i915_request_on_hold(s); 2846 if (result) 2847 break; 2848 } 2849 rcu_read_unlock(); 2850 2851 return result; 2852 } 2853 2854 static void __execlists_unhold(struct i915_request *rq) 2855 { 2856 LIST_HEAD(list); 2857 2858 do { 2859 struct i915_dependency *p; 2860 2861 RQ_TRACE(rq, "hold release\n"); 2862 2863 GEM_BUG_ON(!i915_request_on_hold(rq)); 2864 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 2865 2866 i915_request_clear_hold(rq); 2867 list_move_tail(&rq->sched.link, 2868 i915_sched_lookup_priolist(rq->engine, 2869 rq_prio(rq))); 2870 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 2871 2872 /* Also release any children on this engine that are ready */ 2873 for_each_waiter(p, rq) { 2874 struct i915_request *w = 2875 container_of(p->waiter, typeof(*w), sched); 2876 2877 /* Propagate any change in error status */ 2878 if (rq->fence.error) 2879 i915_request_set_error_once(w, rq->fence.error); 2880 2881 if (w->engine != rq->engine) 2882 continue; 2883 2884 if (!i915_request_on_hold(w)) 2885 continue; 2886 2887 /* Check that no other parents are also on hold */ 2888 if (hold_request(w)) 2889 continue; 2890 2891 list_move_tail(&w->sched.link, &list); 2892 } 2893 2894 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 2895 } while (rq); 2896 } 2897 2898 static void execlists_unhold(struct intel_engine_cs *engine, 2899 struct i915_request *rq) 2900 { 2901 spin_lock_irq(&engine->active.lock); 2902 2903 /* 2904 * Move this request back to the priority queue, and all of its 2905 * children and grandchildren that were suspended along with it. 2906 */ 2907 __execlists_unhold(rq); 2908 2909 if (rq_prio(rq) > engine->execlists.queue_priority_hint) { 2910 engine->execlists.queue_priority_hint = rq_prio(rq); 2911 tasklet_hi_schedule(&engine->execlists.tasklet); 2912 } 2913 2914 spin_unlock_irq(&engine->active.lock); 2915 } 2916 2917 struct execlists_capture { 2918 struct work_struct work; 2919 struct i915_request *rq; 2920 struct i915_gpu_coredump *error; 2921 }; 2922 2923 static void execlists_capture_work(struct work_struct *work) 2924 { 2925 struct execlists_capture *cap = container_of(work, typeof(*cap), work); 2926 const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 2927 struct intel_engine_cs *engine = cap->rq->engine; 2928 struct intel_gt_coredump *gt = cap->error->gt; 2929 struct intel_engine_capture_vma *vma; 2930 2931 /* Compress all the objects attached to the request, slow! */ 2932 vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp); 2933 if (vma) { 2934 struct i915_vma_compress *compress = 2935 i915_vma_capture_prepare(gt); 2936 2937 intel_engine_coredump_add_vma(gt->engine, vma, compress); 2938 i915_vma_capture_finish(gt, compress); 2939 } 2940 2941 gt->simulated = gt->engine->simulated; 2942 cap->error->simulated = gt->simulated; 2943 2944 /* Publish the error state, and announce it to the world */ 2945 i915_error_state_store(cap->error); 2946 i915_gpu_coredump_put(cap->error); 2947 2948 /* Return this request and all that depend upon it for signaling */ 2949 execlists_unhold(engine, cap->rq); 2950 i915_request_put(cap->rq); 2951 2952 kfree(cap); 2953 } 2954 2955 static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) 2956 { 2957 const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; 2958 struct execlists_capture *cap; 2959 2960 cap = kmalloc(sizeof(*cap), gfp); 2961 if (!cap) 2962 return NULL; 2963 2964 cap->error = i915_gpu_coredump_alloc(engine->i915, gfp); 2965 if (!cap->error) 2966 goto err_cap; 2967 2968 cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp); 2969 if (!cap->error->gt) 2970 goto err_gpu; 2971 2972 cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp); 2973 if (!cap->error->gt->engine) 2974 goto err_gt; 2975 2976 return cap; 2977 2978 err_gt: 2979 kfree(cap->error->gt); 2980 err_gpu: 2981 kfree(cap->error); 2982 err_cap: 2983 kfree(cap); 2984 return NULL; 2985 } 2986 2987 static struct i915_request * 2988 active_context(struct intel_engine_cs *engine, u32 ccid) 2989 { 2990 const struct intel_engine_execlists * const el = &engine->execlists; 2991 struct i915_request * const *port, *rq; 2992 2993 /* 2994 * Use the most recent result from process_csb(), but just in case 2995 * we trigger an error (via interrupt) before the first CS event has 2996 * been written, peek at the next submission. 2997 */ 2998 2999 for (port = el->active; (rq = *port); port++) { 3000 if (rq->context->lrc.ccid == ccid) { 3001 ENGINE_TRACE(engine, 3002 "ccid found at active:%zd\n", 3003 port - el->active); 3004 return rq; 3005 } 3006 } 3007 3008 for (port = el->pending; (rq = *port); port++) { 3009 if (rq->context->lrc.ccid == ccid) { 3010 ENGINE_TRACE(engine, 3011 "ccid found at pending:%zd\n", 3012 port - el->pending); 3013 return rq; 3014 } 3015 } 3016 3017 ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); 3018 return NULL; 3019 } 3020 3021 static u32 active_ccid(struct intel_engine_cs *engine) 3022 { 3023 return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); 3024 } 3025 3026 static void execlists_capture(struct intel_engine_cs *engine) 3027 { 3028 struct execlists_capture *cap; 3029 3030 if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) 3031 return; 3032 3033 /* 3034 * We need to _quickly_ capture the engine state before we reset. 3035 * We are inside an atomic section (softirq) here and we are delaying 3036 * the forced preemption event. 3037 */ 3038 cap = capture_regs(engine); 3039 if (!cap) 3040 return; 3041 3042 spin_lock_irq(&engine->active.lock); 3043 cap->rq = active_context(engine, active_ccid(engine)); 3044 if (cap->rq) { 3045 cap->rq = active_request(cap->rq->context->timeline, cap->rq); 3046 cap->rq = i915_request_get_rcu(cap->rq); 3047 } 3048 spin_unlock_irq(&engine->active.lock); 3049 if (!cap->rq) 3050 goto err_free; 3051 3052 /* 3053 * Remove the request from the execlists queue, and take ownership 3054 * of the request. We pass it to our worker who will _slowly_ compress 3055 * all the pages the _user_ requested for debugging their batch, after 3056 * which we return it to the queue for signaling. 3057 * 3058 * By removing them from the execlists queue, we also remove the 3059 * requests from being processed by __unwind_incomplete_requests() 3060 * during the intel_engine_reset(), and so they will *not* be replayed 3061 * afterwards. 3062 * 3063 * Note that because we have not yet reset the engine at this point, 3064 * it is possible for the request that we have identified as being 3065 * guilty, did in fact complete and we will then hit an arbitration 3066 * point allowing the outstanding preemption to succeed. The likelihood 3067 * of that is very low (as capturing of the engine registers should be 3068 * fast enough to run inside an irq-off atomic section!), so we will 3069 * simply hold that request accountable for being non-preemptible 3070 * long enough to force the reset. 3071 */ 3072 if (!execlists_hold(engine, cap->rq)) 3073 goto err_rq; 3074 3075 INIT_WORK(&cap->work, execlists_capture_work); 3076 schedule_work(&cap->work); 3077 return; 3078 3079 err_rq: 3080 i915_request_put(cap->rq); 3081 err_free: 3082 i915_gpu_coredump_put(cap->error); 3083 kfree(cap); 3084 } 3085 3086 static void execlists_reset(struct intel_engine_cs *engine, const char *msg) 3087 { 3088 const unsigned int bit = I915_RESET_ENGINE + engine->id; 3089 unsigned long *lock = &engine->gt->reset.flags; 3090 3091 if (!intel_has_reset_engine(engine->gt)) 3092 return; 3093 3094 if (test_and_set_bit(bit, lock)) 3095 return; 3096 3097 ENGINE_TRACE(engine, "reset for %s\n", msg); 3098 3099 /* Mark this tasklet as disabled to avoid waiting for it to complete */ 3100 tasklet_disable_nosync(&engine->execlists.tasklet); 3101 3102 ring_set_paused(engine, 1); /* Freeze the current request in place */ 3103 execlists_capture(engine); 3104 intel_engine_reset(engine, msg); 3105 3106 tasklet_enable(&engine->execlists.tasklet); 3107 clear_and_wake_up_bit(bit, lock); 3108 } 3109 3110 static bool preempt_timeout(const struct intel_engine_cs *const engine) 3111 { 3112 const struct timer_list *t = &engine->execlists.preempt; 3113 3114 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) 3115 return false; 3116 3117 if (!timer_expired(t)) 3118 return false; 3119 3120 return READ_ONCE(engine->execlists.pending[0]); 3121 } 3122 3123 /* 3124 * Check the unread Context Status Buffers and manage the submission of new 3125 * contexts to the ELSP accordingly. 3126 */ 3127 static void execlists_submission_tasklet(unsigned long data) 3128 { 3129 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 3130 bool timeout = preempt_timeout(engine); 3131 3132 process_csb(engine); 3133 3134 if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { 3135 const char *msg; 3136 3137 /* Generate the error message in priority wrt to the user! */ 3138 if (engine->execlists.error_interrupt & GENMASK(15, 0)) 3139 msg = "CS error"; /* thrown by a user payload */ 3140 else if (engine->execlists.error_interrupt & ERROR_CSB) 3141 msg = "invalid CSB event"; 3142 else 3143 msg = "internal error"; 3144 3145 engine->execlists.error_interrupt = 0; 3146 execlists_reset(engine, msg); 3147 } 3148 3149 if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { 3150 unsigned long flags; 3151 3152 spin_lock_irqsave(&engine->active.lock, flags); 3153 __execlists_submission_tasklet(engine); 3154 spin_unlock_irqrestore(&engine->active.lock, flags); 3155 3156 /* Recheck after serialising with direct-submission */ 3157 if (unlikely(timeout && preempt_timeout(engine))) 3158 execlists_reset(engine, "preemption time out"); 3159 } 3160 } 3161 3162 static void __execlists_kick(struct intel_engine_execlists *execlists) 3163 { 3164 /* Kick the tasklet for some interrupt coalescing and reset handling */ 3165 tasklet_hi_schedule(&execlists->tasklet); 3166 } 3167 3168 #define execlists_kick(t, member) \ 3169 __execlists_kick(container_of(t, struct intel_engine_execlists, member)) 3170 3171 static void execlists_timeslice(struct timer_list *timer) 3172 { 3173 execlists_kick(timer, timer); 3174 } 3175 3176 static void execlists_preempt(struct timer_list *timer) 3177 { 3178 execlists_kick(timer, preempt); 3179 } 3180 3181 static void queue_request(struct intel_engine_cs *engine, 3182 struct i915_request *rq) 3183 { 3184 GEM_BUG_ON(!list_empty(&rq->sched.link)); 3185 list_add_tail(&rq->sched.link, 3186 i915_sched_lookup_priolist(engine, rq_prio(rq))); 3187 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 3188 } 3189 3190 static void __submit_queue_imm(struct intel_engine_cs *engine) 3191 { 3192 struct intel_engine_execlists * const execlists = &engine->execlists; 3193 3194 if (reset_in_progress(execlists)) 3195 return; /* defer until we restart the engine following reset */ 3196 3197 __execlists_submission_tasklet(engine); 3198 } 3199 3200 static void submit_queue(struct intel_engine_cs *engine, 3201 const struct i915_request *rq) 3202 { 3203 struct intel_engine_execlists *execlists = &engine->execlists; 3204 3205 if (rq_prio(rq) <= execlists->queue_priority_hint) 3206 return; 3207 3208 execlists->queue_priority_hint = rq_prio(rq); 3209 __submit_queue_imm(engine); 3210 } 3211 3212 static bool ancestor_on_hold(const struct intel_engine_cs *engine, 3213 const struct i915_request *rq) 3214 { 3215 GEM_BUG_ON(i915_request_on_hold(rq)); 3216 return !list_empty(&engine->active.hold) && hold_request(rq); 3217 } 3218 3219 static void flush_csb(struct intel_engine_cs *engine) 3220 { 3221 struct intel_engine_execlists *el = &engine->execlists; 3222 3223 if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) { 3224 if (!reset_in_progress(el)) 3225 process_csb(engine); 3226 tasklet_unlock(&el->tasklet); 3227 } 3228 } 3229 3230 static void execlists_submit_request(struct i915_request *request) 3231 { 3232 struct intel_engine_cs *engine = request->engine; 3233 unsigned long flags; 3234 3235 /* Hopefully we clear execlists->pending[] to let us through */ 3236 flush_csb(engine); 3237 3238 /* Will be called from irq-context when using foreign fences. */ 3239 spin_lock_irqsave(&engine->active.lock, flags); 3240 3241 if (unlikely(ancestor_on_hold(engine, request))) { 3242 RQ_TRACE(request, "ancestor on hold\n"); 3243 list_add_tail(&request->sched.link, &engine->active.hold); 3244 i915_request_set_hold(request); 3245 } else { 3246 queue_request(engine, request); 3247 3248 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 3249 GEM_BUG_ON(list_empty(&request->sched.link)); 3250 3251 submit_queue(engine, request); 3252 } 3253 3254 spin_unlock_irqrestore(&engine->active.lock, flags); 3255 } 3256 3257 static void __execlists_context_fini(struct intel_context *ce) 3258 { 3259 intel_ring_put(ce->ring); 3260 i915_vma_put(ce->state); 3261 } 3262 3263 static void execlists_context_destroy(struct kref *kref) 3264 { 3265 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 3266 3267 GEM_BUG_ON(!i915_active_is_idle(&ce->active)); 3268 GEM_BUG_ON(intel_context_is_pinned(ce)); 3269 3270 if (ce->state) 3271 __execlists_context_fini(ce); 3272 3273 intel_context_fini(ce); 3274 intel_context_free(ce); 3275 } 3276 3277 static void 3278 set_redzone(void *vaddr, const struct intel_engine_cs *engine) 3279 { 3280 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 3281 return; 3282 3283 vaddr += engine->context_size; 3284 3285 memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); 3286 } 3287 3288 static void 3289 check_redzone(const void *vaddr, const struct intel_engine_cs *engine) 3290 { 3291 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 3292 return; 3293 3294 vaddr += engine->context_size; 3295 3296 if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) 3297 drm_err_once(&engine->i915->drm, 3298 "%s context redzone overwritten!\n", 3299 engine->name); 3300 } 3301 3302 static void execlists_context_unpin(struct intel_context *ce) 3303 { 3304 check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, 3305 ce->engine); 3306 } 3307 3308 static void execlists_context_post_unpin(struct intel_context *ce) 3309 { 3310 i915_gem_object_unpin_map(ce->state->obj); 3311 } 3312 3313 static u32 * 3314 gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) 3315 { 3316 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3317 MI_SRM_LRM_GLOBAL_GTT | 3318 MI_LRI_LRM_CS_MMIO; 3319 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3320 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3321 CTX_TIMESTAMP * sizeof(u32); 3322 *cs++ = 0; 3323 3324 *cs++ = MI_LOAD_REGISTER_REG | 3325 MI_LRR_SOURCE_CS_MMIO | 3326 MI_LRI_LRM_CS_MMIO; 3327 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3328 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 3329 3330 *cs++ = MI_LOAD_REGISTER_REG | 3331 MI_LRR_SOURCE_CS_MMIO | 3332 MI_LRI_LRM_CS_MMIO; 3333 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3334 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 3335 3336 return cs; 3337 } 3338 3339 static u32 * 3340 gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) 3341 { 3342 GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1); 3343 3344 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3345 MI_SRM_LRM_GLOBAL_GTT | 3346 MI_LRI_LRM_CS_MMIO; 3347 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3348 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3349 (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32); 3350 *cs++ = 0; 3351 3352 return cs; 3353 } 3354 3355 static u32 * 3356 gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs) 3357 { 3358 GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1); 3359 3360 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3361 MI_SRM_LRM_GLOBAL_GTT | 3362 MI_LRI_LRM_CS_MMIO; 3363 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3364 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3365 (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32); 3366 *cs++ = 0; 3367 3368 *cs++ = MI_LOAD_REGISTER_REG | 3369 MI_LRR_SOURCE_CS_MMIO | 3370 MI_LRI_LRM_CS_MMIO; 3371 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3372 *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0)); 3373 3374 return cs; 3375 } 3376 3377 static u32 * 3378 gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) 3379 { 3380 cs = gen12_emit_timestamp_wa(ce, cs); 3381 cs = gen12_emit_cmd_buf_wa(ce, cs); 3382 cs = gen12_emit_restore_scratch(ce, cs); 3383 3384 return cs; 3385 } 3386 3387 static u32 * 3388 gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) 3389 { 3390 cs = gen12_emit_timestamp_wa(ce, cs); 3391 cs = gen12_emit_restore_scratch(ce, cs); 3392 3393 return cs; 3394 } 3395 3396 static inline u32 context_wa_bb_offset(const struct intel_context *ce) 3397 { 3398 return PAGE_SIZE * ce->wa_bb_page; 3399 } 3400 3401 static u32 *context_indirect_bb(const struct intel_context *ce) 3402 { 3403 void *ptr; 3404 3405 GEM_BUG_ON(!ce->wa_bb_page); 3406 3407 ptr = ce->lrc_reg_state; 3408 ptr -= LRC_STATE_OFFSET; /* back to start of context image */ 3409 ptr += context_wa_bb_offset(ce); 3410 3411 return ptr; 3412 } 3413 3414 static void 3415 setup_indirect_ctx_bb(const struct intel_context *ce, 3416 const struct intel_engine_cs *engine, 3417 u32 *(*emit)(const struct intel_context *, u32 *)) 3418 { 3419 u32 * const start = context_indirect_bb(ce); 3420 u32 *cs; 3421 3422 cs = emit(ce, start); 3423 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); 3424 while ((unsigned long)cs % CACHELINE_BYTES) 3425 *cs++ = MI_NOOP; 3426 3427 lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine, 3428 i915_ggtt_offset(ce->state) + 3429 context_wa_bb_offset(ce), 3430 (cs - start) * sizeof(*cs)); 3431 } 3432 3433 static void 3434 __execlists_update_reg_state(const struct intel_context *ce, 3435 const struct intel_engine_cs *engine, 3436 u32 head) 3437 { 3438 struct intel_ring *ring = ce->ring; 3439 u32 *regs = ce->lrc_reg_state; 3440 3441 GEM_BUG_ON(!intel_ring_offset_valid(ring, head)); 3442 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 3443 3444 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 3445 regs[CTX_RING_HEAD] = head; 3446 regs[CTX_RING_TAIL] = ring->tail; 3447 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 3448 3449 /* RPCS */ 3450 if (engine->class == RENDER_CLASS) { 3451 regs[CTX_R_PWR_CLK_STATE] = 3452 intel_sseu_make_rpcs(engine->gt, &ce->sseu); 3453 3454 i915_oa_init_reg_state(ce, engine); 3455 } 3456 3457 if (ce->wa_bb_page) { 3458 u32 *(*fn)(const struct intel_context *ce, u32 *cs); 3459 3460 fn = gen12_emit_indirect_ctx_xcs; 3461 if (ce->engine->class == RENDER_CLASS) 3462 fn = gen12_emit_indirect_ctx_rcs; 3463 3464 /* Mutually exclusive wrt to global indirect bb */ 3465 GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); 3466 setup_indirect_ctx_bb(ce, engine, fn); 3467 } 3468 } 3469 3470 static int 3471 execlists_context_pre_pin(struct intel_context *ce, 3472 struct i915_gem_ww_ctx *ww, void **vaddr) 3473 { 3474 GEM_BUG_ON(!ce->state); 3475 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 3476 3477 *vaddr = i915_gem_object_pin_map(ce->state->obj, 3478 i915_coherent_map_type(ce->engine->i915) | 3479 I915_MAP_OVERRIDE); 3480 3481 return PTR_ERR_OR_ZERO(*vaddr); 3482 } 3483 3484 static int 3485 __execlists_context_pin(struct intel_context *ce, 3486 struct intel_engine_cs *engine, 3487 void *vaddr) 3488 { 3489 ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; 3490 ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; 3491 __execlists_update_reg_state(ce, engine, ce->ring->tail); 3492 3493 return 0; 3494 } 3495 3496 static int execlists_context_pin(struct intel_context *ce, void *vaddr) 3497 { 3498 return __execlists_context_pin(ce, ce->engine, vaddr); 3499 } 3500 3501 static int execlists_context_alloc(struct intel_context *ce) 3502 { 3503 return __execlists_context_alloc(ce, ce->engine); 3504 } 3505 3506 static void execlists_context_reset(struct intel_context *ce) 3507 { 3508 CE_TRACE(ce, "reset\n"); 3509 GEM_BUG_ON(!intel_context_is_pinned(ce)); 3510 3511 intel_ring_reset(ce->ring, ce->ring->emit); 3512 3513 /* Scrub away the garbage */ 3514 execlists_init_reg_state(ce->lrc_reg_state, 3515 ce, ce->engine, ce->ring, true); 3516 __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); 3517 3518 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; 3519 } 3520 3521 static const struct intel_context_ops execlists_context_ops = { 3522 .alloc = execlists_context_alloc, 3523 3524 .pre_pin = execlists_context_pre_pin, 3525 .pin = execlists_context_pin, 3526 .unpin = execlists_context_unpin, 3527 .post_unpin = execlists_context_post_unpin, 3528 3529 .enter = intel_context_enter_engine, 3530 .exit = intel_context_exit_engine, 3531 3532 .reset = execlists_context_reset, 3533 .destroy = execlists_context_destroy, 3534 }; 3535 3536 static int gen8_emit_init_breadcrumb(struct i915_request *rq) 3537 { 3538 u32 *cs; 3539 3540 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq)); 3541 if (!i915_request_timeline(rq)->has_initial_breadcrumb) 3542 return 0; 3543 3544 cs = intel_ring_begin(rq, 6); 3545 if (IS_ERR(cs)) 3546 return PTR_ERR(cs); 3547 3548 /* 3549 * Check if we have been preempted before we even get started. 3550 * 3551 * After this point i915_request_started() reports true, even if 3552 * we get preempted and so are no longer running. 3553 */ 3554 *cs++ = MI_ARB_CHECK; 3555 *cs++ = MI_NOOP; 3556 3557 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 3558 *cs++ = i915_request_timeline(rq)->hwsp_offset; 3559 *cs++ = 0; 3560 *cs++ = rq->fence.seqno - 1; 3561 3562 intel_ring_advance(rq, cs); 3563 3564 /* Record the updated position of the request's payload */ 3565 rq->infix = intel_ring_offset(rq, cs); 3566 3567 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); 3568 3569 return 0; 3570 } 3571 3572 static int emit_pdps(struct i915_request *rq) 3573 { 3574 const struct intel_engine_cs * const engine = rq->engine; 3575 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm); 3576 int err, i; 3577 u32 *cs; 3578 3579 GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); 3580 3581 /* 3582 * Beware ye of the dragons, this sequence is magic! 3583 * 3584 * Small changes to this sequence can cause anything from 3585 * GPU hangs to forcewake errors and machine lockups! 3586 */ 3587 3588 /* Flush any residual operations from the context load */ 3589 err = engine->emit_flush(rq, EMIT_FLUSH); 3590 if (err) 3591 return err; 3592 3593 /* Magic required to prevent forcewake errors! */ 3594 err = engine->emit_flush(rq, EMIT_INVALIDATE); 3595 if (err) 3596 return err; 3597 3598 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 3599 if (IS_ERR(cs)) 3600 return PTR_ERR(cs); 3601 3602 /* Ensure the LRI have landed before we invalidate & continue */ 3603 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 3604 for (i = GEN8_3LVL_PDPES; i--; ) { 3605 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 3606 u32 base = engine->mmio_base; 3607 3608 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 3609 *cs++ = upper_32_bits(pd_daddr); 3610 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 3611 *cs++ = lower_32_bits(pd_daddr); 3612 } 3613 *cs++ = MI_NOOP; 3614 3615 intel_ring_advance(rq, cs); 3616 3617 return 0; 3618 } 3619 3620 static int execlists_request_alloc(struct i915_request *request) 3621 { 3622 int ret; 3623 3624 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 3625 3626 /* 3627 * Flush enough space to reduce the likelihood of waiting after 3628 * we start building the request - in which case we will just 3629 * have to repeat work. 3630 */ 3631 request->reserved_space += EXECLISTS_REQUEST_SIZE; 3632 3633 /* 3634 * Note that after this point, we have committed to using 3635 * this request as it is being used to both track the 3636 * state of engine initialisation and liveness of the 3637 * golden renderstate above. Think twice before you try 3638 * to cancel/unwind this request now. 3639 */ 3640 3641 if (!i915_vm_is_4lvl(request->context->vm)) { 3642 ret = emit_pdps(request); 3643 if (ret) 3644 return ret; 3645 } 3646 3647 /* Unconditionally invalidate GPU caches and TLBs. */ 3648 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 3649 if (ret) 3650 return ret; 3651 3652 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 3653 return 0; 3654 } 3655 3656 /* 3657 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 3658 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 3659 * but there is a slight complication as this is applied in WA batch where the 3660 * values are only initialized once so we cannot take register value at the 3661 * beginning and reuse it further; hence we save its value to memory, upload a 3662 * constant value with bit21 set and then we restore it back with the saved value. 3663 * To simplify the WA, a constant value is formed by using the default value 3664 * of this register. This shouldn't be a problem because we are only modifying 3665 * it for a short period and this batch in non-premptible. We can ofcourse 3666 * use additional instructions that read the actual value of the register 3667 * at that time and set our bit of interest but it makes the WA complicated. 3668 * 3669 * This WA is also required for Gen9 so extracting as a function avoids 3670 * code duplication. 3671 */ 3672 static u32 * 3673 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 3674 { 3675 /* NB no one else is allowed to scribble over scratch + 256! */ 3676 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 3677 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3678 *batch++ = intel_gt_scratch_offset(engine->gt, 3679 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 3680 *batch++ = 0; 3681 3682 *batch++ = MI_LOAD_REGISTER_IMM(1); 3683 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3684 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; 3685 3686 batch = gen8_emit_pipe_control(batch, 3687 PIPE_CONTROL_CS_STALL | 3688 PIPE_CONTROL_DC_FLUSH_ENABLE, 3689 0); 3690 3691 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 3692 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3693 *batch++ = intel_gt_scratch_offset(engine->gt, 3694 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 3695 *batch++ = 0; 3696 3697 return batch; 3698 } 3699 3700 /* 3701 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 3702 * initialized at the beginning and shared across all contexts but this field 3703 * helps us to have multiple batches at different offsets and select them based 3704 * on a criteria. At the moment this batch always start at the beginning of the page 3705 * and at this point we don't have multiple wa_ctx batch buffers. 3706 * 3707 * The number of WA applied are not known at the beginning; we use this field 3708 * to return the no of DWORDS written. 3709 * 3710 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 3711 * so it adds NOOPs as padding to make it cacheline aligned. 3712 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 3713 * makes a complete batch buffer. 3714 */ 3715 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3716 { 3717 /* WaDisableCtxRestoreArbitration:bdw,chv */ 3718 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 3719 3720 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 3721 if (IS_BROADWELL(engine->i915)) 3722 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 3723 3724 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 3725 /* Actual scratch location is at 128 bytes offset */ 3726 batch = gen8_emit_pipe_control(batch, 3727 PIPE_CONTROL_FLUSH_L3 | 3728 PIPE_CONTROL_STORE_DATA_INDEX | 3729 PIPE_CONTROL_CS_STALL | 3730 PIPE_CONTROL_QW_WRITE, 3731 LRC_PPHWSP_SCRATCH_ADDR); 3732 3733 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 3734 3735 /* Pad to end of cacheline */ 3736 while ((unsigned long)batch % CACHELINE_BYTES) 3737 *batch++ = MI_NOOP; 3738 3739 /* 3740 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 3741 * execution depends on the length specified in terms of cache lines 3742 * in the register CTX_RCS_INDIRECT_CTX 3743 */ 3744 3745 return batch; 3746 } 3747 3748 struct lri { 3749 i915_reg_t reg; 3750 u32 value; 3751 }; 3752 3753 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) 3754 { 3755 GEM_BUG_ON(!count || count > 63); 3756 3757 *batch++ = MI_LOAD_REGISTER_IMM(count); 3758 do { 3759 *batch++ = i915_mmio_reg_offset(lri->reg); 3760 *batch++ = lri->value; 3761 } while (lri++, --count); 3762 *batch++ = MI_NOOP; 3763 3764 return batch; 3765 } 3766 3767 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3768 { 3769 static const struct lri lri[] = { 3770 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 3771 { 3772 COMMON_SLICE_CHICKEN2, 3773 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, 3774 0), 3775 }, 3776 3777 /* BSpec: 11391 */ 3778 { 3779 FF_SLICE_CHICKEN, 3780 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, 3781 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), 3782 }, 3783 3784 /* BSpec: 11299 */ 3785 { 3786 _3D_CHICKEN3, 3787 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, 3788 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), 3789 } 3790 }; 3791 3792 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 3793 3794 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 3795 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 3796 3797 /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ 3798 batch = gen8_emit_pipe_control(batch, 3799 PIPE_CONTROL_FLUSH_L3 | 3800 PIPE_CONTROL_STORE_DATA_INDEX | 3801 PIPE_CONTROL_CS_STALL | 3802 PIPE_CONTROL_QW_WRITE, 3803 LRC_PPHWSP_SCRATCH_ADDR); 3804 3805 batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); 3806 3807 /* WaMediaPoolStateCmdInWABB:bxt,glk */ 3808 if (HAS_POOLED_EU(engine->i915)) { 3809 /* 3810 * EU pool configuration is setup along with golden context 3811 * during context initialization. This value depends on 3812 * device type (2x6 or 3x6) and needs to be updated based 3813 * on which subslice is disabled especially for 2x6 3814 * devices, however it is safe to load default 3815 * configuration of 3x6 device instead of masking off 3816 * corresponding bits because HW ignores bits of a disabled 3817 * subslice and drops down to appropriate config. Please 3818 * see render_state_setup() in i915_gem_render_state.c for 3819 * possible configurations, to avoid duplication they are 3820 * not shown here again. 3821 */ 3822 *batch++ = GEN9_MEDIA_POOL_STATE; 3823 *batch++ = GEN9_MEDIA_POOL_ENABLE; 3824 *batch++ = 0x00777000; 3825 *batch++ = 0; 3826 *batch++ = 0; 3827 *batch++ = 0; 3828 } 3829 3830 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 3831 3832 /* Pad to end of cacheline */ 3833 while ((unsigned long)batch % CACHELINE_BYTES) 3834 *batch++ = MI_NOOP; 3835 3836 return batch; 3837 } 3838 3839 static u32 * 3840 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3841 { 3842 int i; 3843 3844 /* 3845 * WaPipeControlBefore3DStateSamplePattern: cnl 3846 * 3847 * Ensure the engine is idle prior to programming a 3848 * 3DSTATE_SAMPLE_PATTERN during a context restore. 3849 */ 3850 batch = gen8_emit_pipe_control(batch, 3851 PIPE_CONTROL_CS_STALL, 3852 0); 3853 /* 3854 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for 3855 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in 3856 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is 3857 * confusing. Since gen8_emit_pipe_control() already advances the 3858 * batch by 6 dwords, we advance the other 10 here, completing a 3859 * cacheline. It's not clear if the workaround requires this padding 3860 * before other commands, or if it's just the regular padding we would 3861 * already have for the workaround bb, so leave it here for now. 3862 */ 3863 for (i = 0; i < 10; i++) 3864 *batch++ = MI_NOOP; 3865 3866 /* Pad to end of cacheline */ 3867 while ((unsigned long)batch % CACHELINE_BYTES) 3868 *batch++ = MI_NOOP; 3869 3870 return batch; 3871 } 3872 3873 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) 3874 3875 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) 3876 { 3877 struct drm_i915_gem_object *obj; 3878 struct i915_vma *vma; 3879 int err; 3880 3881 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); 3882 if (IS_ERR(obj)) 3883 return PTR_ERR(obj); 3884 3885 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 3886 if (IS_ERR(vma)) { 3887 err = PTR_ERR(vma); 3888 goto err; 3889 } 3890 3891 err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); 3892 if (err) 3893 goto err; 3894 3895 engine->wa_ctx.vma = vma; 3896 return 0; 3897 3898 err: 3899 i915_gem_object_put(obj); 3900 return err; 3901 } 3902 3903 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) 3904 { 3905 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 3906 } 3907 3908 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); 3909 3910 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 3911 { 3912 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 3913 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, 3914 &wa_ctx->per_ctx }; 3915 wa_bb_func_t wa_bb_fn[2]; 3916 void *batch, *batch_ptr; 3917 unsigned int i; 3918 int ret; 3919 3920 if (engine->class != RENDER_CLASS) 3921 return 0; 3922 3923 switch (INTEL_GEN(engine->i915)) { 3924 case 12: 3925 case 11: 3926 return 0; 3927 case 10: 3928 wa_bb_fn[0] = gen10_init_indirectctx_bb; 3929 wa_bb_fn[1] = NULL; 3930 break; 3931 case 9: 3932 wa_bb_fn[0] = gen9_init_indirectctx_bb; 3933 wa_bb_fn[1] = NULL; 3934 break; 3935 case 8: 3936 wa_bb_fn[0] = gen8_init_indirectctx_bb; 3937 wa_bb_fn[1] = NULL; 3938 break; 3939 default: 3940 MISSING_CASE(INTEL_GEN(engine->i915)); 3941 return 0; 3942 } 3943 3944 ret = lrc_setup_wa_ctx(engine); 3945 if (ret) { 3946 drm_dbg(&engine->i915->drm, 3947 "Failed to setup context WA page: %d\n", ret); 3948 return ret; 3949 } 3950 3951 batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); 3952 3953 /* 3954 * Emit the two workaround batch buffers, recording the offset from the 3955 * start of the workaround batch buffer object for each and their 3956 * respective sizes. 3957 */ 3958 batch_ptr = batch; 3959 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 3960 wa_bb[i]->offset = batch_ptr - batch; 3961 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 3962 CACHELINE_BYTES))) { 3963 ret = -EINVAL; 3964 break; 3965 } 3966 if (wa_bb_fn[i]) 3967 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 3968 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 3969 } 3970 GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); 3971 3972 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); 3973 __i915_gem_object_release_map(wa_ctx->vma->obj); 3974 if (ret) 3975 lrc_destroy_wa_ctx(engine); 3976 3977 return ret; 3978 } 3979 3980 static void reset_csb_pointers(struct intel_engine_cs *engine) 3981 { 3982 struct intel_engine_execlists * const execlists = &engine->execlists; 3983 const unsigned int reset_value = execlists->csb_size - 1; 3984 3985 ring_set_paused(engine, 0); 3986 3987 /* 3988 * Sometimes Icelake forgets to reset its pointers on a GPU reset. 3989 * Bludgeon them with a mmio update to be sure. 3990 */ 3991 ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, 3992 0xffff << 16 | reset_value << 8 | reset_value); 3993 ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); 3994 3995 /* 3996 * After a reset, the HW starts writing into CSB entry [0]. We 3997 * therefore have to set our HEAD pointer back one entry so that 3998 * the *first* entry we check is entry 0. To complicate this further, 3999 * as we don't wait for the first interrupt after reset, we have to 4000 * fake the HW write to point back to the last entry so that our 4001 * inline comparison of our cached head position against the last HW 4002 * write works even before the first interrupt. 4003 */ 4004 execlists->csb_head = reset_value; 4005 WRITE_ONCE(*execlists->csb_write, reset_value); 4006 wmb(); /* Make sure this is visible to HW (paranoia?) */ 4007 4008 invalidate_csb_entries(&execlists->csb_status[0], 4009 &execlists->csb_status[reset_value]); 4010 4011 /* Once more for luck and our trusty paranoia */ 4012 ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, 4013 0xffff << 16 | reset_value << 8 | reset_value); 4014 ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); 4015 4016 GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value); 4017 } 4018 4019 static void execlists_sanitize(struct intel_engine_cs *engine) 4020 { 4021 /* 4022 * Poison residual state on resume, in case the suspend didn't! 4023 * 4024 * We have to assume that across suspend/resume (or other loss 4025 * of control) that the contents of our pinned buffers has been 4026 * lost, replaced by garbage. Since this doesn't always happen, 4027 * let's poison such state so that we more quickly spot when 4028 * we falsely assume it has been preserved. 4029 */ 4030 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 4031 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 4032 4033 reset_csb_pointers(engine); 4034 4035 /* 4036 * The kernel_context HWSP is stored in the status_page. As above, 4037 * that may be lost on resume/initialisation, and so we need to 4038 * reset the value in the HWSP. 4039 */ 4040 intel_timeline_reset_seqno(engine->kernel_context->timeline); 4041 4042 /* And scrub the dirty cachelines for the HWSP */ 4043 clflush_cache_range(engine->status_page.addr, PAGE_SIZE); 4044 } 4045 4046 static void enable_error_interrupt(struct intel_engine_cs *engine) 4047 { 4048 u32 status; 4049 4050 engine->execlists.error_interrupt = 0; 4051 ENGINE_WRITE(engine, RING_EMR, ~0u); 4052 ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ 4053 4054 status = ENGINE_READ(engine, RING_ESR); 4055 if (unlikely(status)) { 4056 drm_err(&engine->i915->drm, 4057 "engine '%s' resumed still in error: %08x\n", 4058 engine->name, status); 4059 __intel_gt_reset(engine->gt, engine->mask); 4060 } 4061 4062 /* 4063 * On current gen8+, we have 2 signals to play with 4064 * 4065 * - I915_ERROR_INSTUCTION (bit 0) 4066 * 4067 * Generate an error if the command parser encounters an invalid 4068 * instruction 4069 * 4070 * This is a fatal error. 4071 * 4072 * - CP_PRIV (bit 2) 4073 * 4074 * Generate an error on privilege violation (where the CP replaces 4075 * the instruction with a no-op). This also fires for writes into 4076 * read-only scratch pages. 4077 * 4078 * This is a non-fatal error, parsing continues. 4079 * 4080 * * there are a few others defined for odd HW that we do not use 4081 * 4082 * Since CP_PRIV fires for cases where we have chosen to ignore the 4083 * error (as the HW is validating and suppressing the mistakes), we 4084 * only unmask the instruction error bit. 4085 */ 4086 ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); 4087 } 4088 4089 static void enable_execlists(struct intel_engine_cs *engine) 4090 { 4091 u32 mode; 4092 4093 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); 4094 4095 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ 4096 4097 if (INTEL_GEN(engine->i915) >= 11) 4098 mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); 4099 else 4100 mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); 4101 ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); 4102 4103 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 4104 4105 ENGINE_WRITE_FW(engine, 4106 RING_HWS_PGA, 4107 i915_ggtt_offset(engine->status_page.vma)); 4108 ENGINE_POSTING_READ(engine, RING_HWS_PGA); 4109 4110 enable_error_interrupt(engine); 4111 4112 engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); 4113 } 4114 4115 static bool unexpected_starting_state(struct intel_engine_cs *engine) 4116 { 4117 bool unexpected = false; 4118 4119 if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { 4120 drm_dbg(&engine->i915->drm, 4121 "STOP_RING still set in RING_MI_MODE\n"); 4122 unexpected = true; 4123 } 4124 4125 return unexpected; 4126 } 4127 4128 static int execlists_resume(struct intel_engine_cs *engine) 4129 { 4130 intel_mocs_init_engine(engine); 4131 4132 intel_breadcrumbs_reset(engine->breadcrumbs); 4133 4134 if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { 4135 struct drm_printer p = drm_debug_printer(__func__); 4136 4137 intel_engine_dump(engine, &p, NULL); 4138 } 4139 4140 enable_execlists(engine); 4141 4142 return 0; 4143 } 4144 4145 static void execlists_reset_prepare(struct intel_engine_cs *engine) 4146 { 4147 struct intel_engine_execlists * const execlists = &engine->execlists; 4148 unsigned long flags; 4149 4150 ENGINE_TRACE(engine, "depth<-%d\n", 4151 atomic_read(&execlists->tasklet.count)); 4152 4153 /* 4154 * Prevent request submission to the hardware until we have 4155 * completed the reset in i915_gem_reset_finish(). If a request 4156 * is completed by one engine, it may then queue a request 4157 * to a second via its execlists->tasklet *just* as we are 4158 * calling engine->resume() and also writing the ELSP. 4159 * Turning off the execlists->tasklet until the reset is over 4160 * prevents the race. 4161 */ 4162 __tasklet_disable_sync_once(&execlists->tasklet); 4163 GEM_BUG_ON(!reset_in_progress(execlists)); 4164 4165 /* And flush any current direct submission. */ 4166 spin_lock_irqsave(&engine->active.lock, flags); 4167 spin_unlock_irqrestore(&engine->active.lock, flags); 4168 4169 /* 4170 * We stop engines, otherwise we might get failed reset and a 4171 * dead gpu (on elk). Also as modern gpu as kbl can suffer 4172 * from system hang if batchbuffer is progressing when 4173 * the reset is issued, regardless of READY_TO_RESET ack. 4174 * Thus assume it is best to stop engines on all gens 4175 * where we have a gpu reset. 4176 * 4177 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 4178 * 4179 * FIXME: Wa for more modern gens needs to be validated 4180 */ 4181 ring_set_paused(engine, 1); 4182 intel_engine_stop_cs(engine); 4183 4184 engine->execlists.reset_ccid = active_ccid(engine); 4185 } 4186 4187 static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) 4188 { 4189 int x; 4190 4191 x = lrc_ring_mi_mode(engine); 4192 if (x != -1) { 4193 regs[x + 1] &= ~STOP_RING; 4194 regs[x + 1] |= STOP_RING << 16; 4195 } 4196 } 4197 4198 static void __execlists_reset_reg_state(const struct intel_context *ce, 4199 const struct intel_engine_cs *engine) 4200 { 4201 u32 *regs = ce->lrc_reg_state; 4202 4203 __reset_stop_ring(regs, engine); 4204 } 4205 4206 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) 4207 { 4208 struct intel_engine_execlists * const execlists = &engine->execlists; 4209 struct intel_context *ce; 4210 struct i915_request *rq; 4211 u32 head; 4212 4213 mb(); /* paranoia: read the CSB pointers from after the reset */ 4214 clflush(execlists->csb_write); 4215 mb(); 4216 4217 process_csb(engine); /* drain preemption events */ 4218 4219 /* Following the reset, we need to reload the CSB read/write pointers */ 4220 reset_csb_pointers(engine); 4221 4222 /* 4223 * Save the currently executing context, even if we completed 4224 * its request, it was still running at the time of the 4225 * reset and will have been clobbered. 4226 */ 4227 rq = active_context(engine, engine->execlists.reset_ccid); 4228 if (!rq) 4229 goto unwind; 4230 4231 ce = rq->context; 4232 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 4233 4234 if (i915_request_completed(rq)) { 4235 /* Idle context; tidy up the ring so we can restart afresh */ 4236 head = intel_ring_wrap(ce->ring, rq->tail); 4237 goto out_replay; 4238 } 4239 4240 /* We still have requests in-flight; the engine should be active */ 4241 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 4242 4243 /* Context has requests still in-flight; it should not be idle! */ 4244 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 4245 4246 rq = active_request(ce->timeline, rq); 4247 head = intel_ring_wrap(ce->ring, rq->head); 4248 GEM_BUG_ON(head == ce->ring->tail); 4249 4250 /* 4251 * If this request hasn't started yet, e.g. it is waiting on a 4252 * semaphore, we need to avoid skipping the request or else we 4253 * break the signaling chain. However, if the context is corrupt 4254 * the request will not restart and we will be stuck with a wedged 4255 * device. It is quite often the case that if we issue a reset 4256 * while the GPU is loading the context image, that the context 4257 * image becomes corrupt. 4258 * 4259 * Otherwise, if we have not started yet, the request should replay 4260 * perfectly and we do not need to flag the result as being erroneous. 4261 */ 4262 if (!i915_request_started(rq)) 4263 goto out_replay; 4264 4265 /* 4266 * If the request was innocent, we leave the request in the ELSP 4267 * and will try to replay it on restarting. The context image may 4268 * have been corrupted by the reset, in which case we may have 4269 * to service a new GPU hang, but more likely we can continue on 4270 * without impact. 4271 * 4272 * If the request was guilty, we presume the context is corrupt 4273 * and have to at least restore the RING register in the context 4274 * image back to the expected values to skip over the guilty request. 4275 */ 4276 __i915_request_reset(rq, stalled); 4277 4278 /* 4279 * We want a simple context + ring to execute the breadcrumb update. 4280 * We cannot rely on the context being intact across the GPU hang, 4281 * so clear it and rebuild just what we need for the breadcrumb. 4282 * All pending requests for this context will be zapped, and any 4283 * future request will be after userspace has had the opportunity 4284 * to recreate its own state. 4285 */ 4286 out_replay: 4287 ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", 4288 head, ce->ring->tail); 4289 __execlists_reset_reg_state(ce, engine); 4290 __execlists_update_reg_state(ce, engine, head); 4291 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ 4292 4293 unwind: 4294 /* Push back any incomplete requests for replay after the reset. */ 4295 cancel_port_requests(execlists); 4296 __unwind_incomplete_requests(engine); 4297 } 4298 4299 static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) 4300 { 4301 unsigned long flags; 4302 4303 ENGINE_TRACE(engine, "\n"); 4304 4305 spin_lock_irqsave(&engine->active.lock, flags); 4306 4307 __execlists_reset(engine, stalled); 4308 4309 spin_unlock_irqrestore(&engine->active.lock, flags); 4310 } 4311 4312 static void nop_submission_tasklet(unsigned long data) 4313 { 4314 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 4315 4316 /* The driver is wedged; don't process any more events. */ 4317 WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); 4318 } 4319 4320 static void execlists_reset_cancel(struct intel_engine_cs *engine) 4321 { 4322 struct intel_engine_execlists * const execlists = &engine->execlists; 4323 struct i915_request *rq, *rn; 4324 struct rb_node *rb; 4325 unsigned long flags; 4326 4327 ENGINE_TRACE(engine, "\n"); 4328 4329 /* 4330 * Before we call engine->cancel_requests(), we should have exclusive 4331 * access to the submission state. This is arranged for us by the 4332 * caller disabling the interrupt generation, the tasklet and other 4333 * threads that may then access the same state, giving us a free hand 4334 * to reset state. However, we still need to let lockdep be aware that 4335 * we know this state may be accessed in hardirq context, so we 4336 * disable the irq around this manipulation and we want to keep 4337 * the spinlock focused on its duties and not accidentally conflate 4338 * coverage to the submission's irq state. (Similarly, although we 4339 * shouldn't need to disable irq around the manipulation of the 4340 * submission's irq state, we also wish to remind ourselves that 4341 * it is irq state.) 4342 */ 4343 spin_lock_irqsave(&engine->active.lock, flags); 4344 4345 __execlists_reset(engine, true); 4346 4347 /* Mark all executing requests as skipped. */ 4348 list_for_each_entry(rq, &engine->active.requests, sched.link) 4349 mark_eio(rq); 4350 4351 /* Flush the queued requests to the timeline list (for retiring). */ 4352 while ((rb = rb_first_cached(&execlists->queue))) { 4353 struct i915_priolist *p = to_priolist(rb); 4354 int i; 4355 4356 priolist_for_each_request_consume(rq, rn, p, i) { 4357 mark_eio(rq); 4358 __i915_request_submit(rq); 4359 } 4360 4361 rb_erase_cached(&p->node, &execlists->queue); 4362 i915_priolist_free(p); 4363 } 4364 4365 /* On-hold requests will be flushed to timeline upon their release */ 4366 list_for_each_entry(rq, &engine->active.hold, sched.link) 4367 mark_eio(rq); 4368 4369 /* Cancel all attached virtual engines */ 4370 while ((rb = rb_first_cached(&execlists->virtual))) { 4371 struct virtual_engine *ve = 4372 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 4373 4374 rb_erase_cached(rb, &execlists->virtual); 4375 RB_CLEAR_NODE(rb); 4376 4377 spin_lock(&ve->base.active.lock); 4378 rq = fetch_and_zero(&ve->request); 4379 if (rq) { 4380 mark_eio(rq); 4381 4382 rq->engine = engine; 4383 __i915_request_submit(rq); 4384 i915_request_put(rq); 4385 4386 ve->base.execlists.queue_priority_hint = INT_MIN; 4387 } 4388 spin_unlock(&ve->base.active.lock); 4389 } 4390 4391 /* Remaining _unready_ requests will be nop'ed when submitted */ 4392 4393 execlists->queue_priority_hint = INT_MIN; 4394 execlists->queue = RB_ROOT_CACHED; 4395 4396 GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); 4397 execlists->tasklet.func = nop_submission_tasklet; 4398 4399 spin_unlock_irqrestore(&engine->active.lock, flags); 4400 } 4401 4402 static void execlists_reset_finish(struct intel_engine_cs *engine) 4403 { 4404 struct intel_engine_execlists * const execlists = &engine->execlists; 4405 4406 /* 4407 * After a GPU reset, we may have requests to replay. Do so now while 4408 * we still have the forcewake to be sure that the GPU is not allowed 4409 * to sleep before we restart and reload a context. 4410 */ 4411 GEM_BUG_ON(!reset_in_progress(execlists)); 4412 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) 4413 execlists->tasklet.func(execlists->tasklet.data); 4414 4415 if (__tasklet_enable(&execlists->tasklet)) 4416 /* And kick in case we missed a new request submission. */ 4417 tasklet_hi_schedule(&execlists->tasklet); 4418 ENGINE_TRACE(engine, "depth->%d\n", 4419 atomic_read(&execlists->tasklet.count)); 4420 } 4421 4422 static int gen8_emit_bb_start_noarb(struct i915_request *rq, 4423 u64 offset, u32 len, 4424 const unsigned int flags) 4425 { 4426 u32 *cs; 4427 4428 cs = intel_ring_begin(rq, 4); 4429 if (IS_ERR(cs)) 4430 return PTR_ERR(cs); 4431 4432 /* 4433 * WaDisableCtxRestoreArbitration:bdw,chv 4434 * 4435 * We don't need to perform MI_ARB_ENABLE as often as we do (in 4436 * particular all the gen that do not need the w/a at all!), if we 4437 * took care to make sure that on every switch into this context 4438 * (both ordinary and for preemption) that arbitrartion was enabled 4439 * we would be fine. However, for gen8 there is another w/a that 4440 * requires us to not preempt inside GPGPU execution, so we keep 4441 * arbitration disabled for gen8 batches. Arbitration will be 4442 * re-enabled before we close the request 4443 * (engine->emit_fini_breadcrumb). 4444 */ 4445 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 4446 4447 /* FIXME(BDW+): Address space and security selectors. */ 4448 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 4449 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 4450 *cs++ = lower_32_bits(offset); 4451 *cs++ = upper_32_bits(offset); 4452 4453 intel_ring_advance(rq, cs); 4454 4455 return 0; 4456 } 4457 4458 static int gen8_emit_bb_start(struct i915_request *rq, 4459 u64 offset, u32 len, 4460 const unsigned int flags) 4461 { 4462 u32 *cs; 4463 4464 cs = intel_ring_begin(rq, 6); 4465 if (IS_ERR(cs)) 4466 return PTR_ERR(cs); 4467 4468 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4469 4470 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 4471 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 4472 *cs++ = lower_32_bits(offset); 4473 *cs++ = upper_32_bits(offset); 4474 4475 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 4476 *cs++ = MI_NOOP; 4477 4478 intel_ring_advance(rq, cs); 4479 4480 return 0; 4481 } 4482 4483 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 4484 { 4485 ENGINE_WRITE(engine, RING_IMR, 4486 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 4487 ENGINE_POSTING_READ(engine, RING_IMR); 4488 } 4489 4490 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 4491 { 4492 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 4493 } 4494 4495 static int gen8_emit_flush(struct i915_request *request, u32 mode) 4496 { 4497 u32 cmd, *cs; 4498 4499 cs = intel_ring_begin(request, 4); 4500 if (IS_ERR(cs)) 4501 return PTR_ERR(cs); 4502 4503 cmd = MI_FLUSH_DW + 1; 4504 4505 /* We always require a command barrier so that subsequent 4506 * commands, such as breadcrumb interrupts, are strictly ordered 4507 * wrt the contents of the write cache being flushed to memory 4508 * (and thus being coherent from the CPU). 4509 */ 4510 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 4511 4512 if (mode & EMIT_INVALIDATE) { 4513 cmd |= MI_INVALIDATE_TLB; 4514 if (request->engine->class == VIDEO_DECODE_CLASS) 4515 cmd |= MI_INVALIDATE_BSD; 4516 } 4517 4518 *cs++ = cmd; 4519 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 4520 *cs++ = 0; /* upper addr */ 4521 *cs++ = 0; /* value */ 4522 intel_ring_advance(request, cs); 4523 4524 return 0; 4525 } 4526 4527 static int gen8_emit_flush_render(struct i915_request *request, 4528 u32 mode) 4529 { 4530 bool vf_flush_wa = false, dc_flush_wa = false; 4531 u32 *cs, flags = 0; 4532 int len; 4533 4534 flags |= PIPE_CONTROL_CS_STALL; 4535 4536 if (mode & EMIT_FLUSH) { 4537 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4538 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4539 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4540 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4541 } 4542 4543 if (mode & EMIT_INVALIDATE) { 4544 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4545 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4546 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4547 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4548 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4549 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4550 flags |= PIPE_CONTROL_QW_WRITE; 4551 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4552 4553 /* 4554 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 4555 * pipe control. 4556 */ 4557 if (IS_GEN(request->engine->i915, 9)) 4558 vf_flush_wa = true; 4559 4560 /* WaForGAMHang:kbl */ 4561 if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0)) 4562 dc_flush_wa = true; 4563 } 4564 4565 len = 6; 4566 4567 if (vf_flush_wa) 4568 len += 6; 4569 4570 if (dc_flush_wa) 4571 len += 12; 4572 4573 cs = intel_ring_begin(request, len); 4574 if (IS_ERR(cs)) 4575 return PTR_ERR(cs); 4576 4577 if (vf_flush_wa) 4578 cs = gen8_emit_pipe_control(cs, 0, 0); 4579 4580 if (dc_flush_wa) 4581 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 4582 0); 4583 4584 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4585 4586 if (dc_flush_wa) 4587 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); 4588 4589 intel_ring_advance(request, cs); 4590 4591 return 0; 4592 } 4593 4594 static int gen11_emit_flush_render(struct i915_request *request, 4595 u32 mode) 4596 { 4597 if (mode & EMIT_FLUSH) { 4598 u32 *cs; 4599 u32 flags = 0; 4600 4601 flags |= PIPE_CONTROL_CS_STALL; 4602 4603 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 4604 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4605 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4606 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4607 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4608 flags |= PIPE_CONTROL_QW_WRITE; 4609 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4610 4611 cs = intel_ring_begin(request, 6); 4612 if (IS_ERR(cs)) 4613 return PTR_ERR(cs); 4614 4615 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4616 intel_ring_advance(request, cs); 4617 } 4618 4619 if (mode & EMIT_INVALIDATE) { 4620 u32 *cs; 4621 u32 flags = 0; 4622 4623 flags |= PIPE_CONTROL_CS_STALL; 4624 4625 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 4626 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4627 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4628 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4629 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4630 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4631 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4632 flags |= PIPE_CONTROL_QW_WRITE; 4633 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4634 4635 cs = intel_ring_begin(request, 6); 4636 if (IS_ERR(cs)) 4637 return PTR_ERR(cs); 4638 4639 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4640 intel_ring_advance(request, cs); 4641 } 4642 4643 return 0; 4644 } 4645 4646 static u32 preparser_disable(bool state) 4647 { 4648 return MI_ARB_CHECK | 1 << 8 | state; 4649 } 4650 4651 static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine) 4652 { 4653 static const i915_reg_t vd[] = { 4654 GEN12_VD0_AUX_NV, 4655 GEN12_VD1_AUX_NV, 4656 GEN12_VD2_AUX_NV, 4657 GEN12_VD3_AUX_NV, 4658 }; 4659 4660 static const i915_reg_t ve[] = { 4661 GEN12_VE0_AUX_NV, 4662 GEN12_VE1_AUX_NV, 4663 }; 4664 4665 if (engine->class == VIDEO_DECODE_CLASS) 4666 return vd[engine->instance]; 4667 4668 if (engine->class == VIDEO_ENHANCEMENT_CLASS) 4669 return ve[engine->instance]; 4670 4671 GEM_BUG_ON("unknown aux_inv_reg\n"); 4672 4673 return INVALID_MMIO_REG; 4674 } 4675 4676 static u32 * 4677 gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs) 4678 { 4679 *cs++ = MI_LOAD_REGISTER_IMM(1); 4680 *cs++ = i915_mmio_reg_offset(inv_reg); 4681 *cs++ = AUX_INV; 4682 *cs++ = MI_NOOP; 4683 4684 return cs; 4685 } 4686 4687 static int gen12_emit_flush_render(struct i915_request *request, 4688 u32 mode) 4689 { 4690 if (mode & EMIT_FLUSH) { 4691 u32 flags = 0; 4692 u32 *cs; 4693 4694 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 4695 flags |= PIPE_CONTROL_FLUSH_L3; 4696 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4697 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4698 /* Wa_1409600907:tgl */ 4699 flags |= PIPE_CONTROL_DEPTH_STALL; 4700 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4701 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4702 4703 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4704 flags |= PIPE_CONTROL_QW_WRITE; 4705 4706 flags |= PIPE_CONTROL_CS_STALL; 4707 4708 cs = intel_ring_begin(request, 6); 4709 if (IS_ERR(cs)) 4710 return PTR_ERR(cs); 4711 4712 cs = gen12_emit_pipe_control(cs, 4713 PIPE_CONTROL0_HDC_PIPELINE_FLUSH, 4714 flags, LRC_PPHWSP_SCRATCH_ADDR); 4715 intel_ring_advance(request, cs); 4716 } 4717 4718 if (mode & EMIT_INVALIDATE) { 4719 u32 flags = 0; 4720 u32 *cs; 4721 4722 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 4723 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4724 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4725 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4726 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4727 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4728 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4729 4730 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4731 flags |= PIPE_CONTROL_QW_WRITE; 4732 4733 flags |= PIPE_CONTROL_CS_STALL; 4734 4735 cs = intel_ring_begin(request, 8 + 4); 4736 if (IS_ERR(cs)) 4737 return PTR_ERR(cs); 4738 4739 /* 4740 * Prevent the pre-parser from skipping past the TLB 4741 * invalidate and loading a stale page for the batch 4742 * buffer / request payload. 4743 */ 4744 *cs++ = preparser_disable(true); 4745 4746 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4747 4748 /* hsdes: 1809175790 */ 4749 cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs); 4750 4751 *cs++ = preparser_disable(false); 4752 intel_ring_advance(request, cs); 4753 } 4754 4755 return 0; 4756 } 4757 4758 static int gen12_emit_flush(struct i915_request *request, u32 mode) 4759 { 4760 intel_engine_mask_t aux_inv = 0; 4761 u32 cmd, *cs; 4762 4763 cmd = 4; 4764 if (mode & EMIT_INVALIDATE) 4765 cmd += 2; 4766 if (mode & EMIT_INVALIDATE) 4767 aux_inv = request->engine->mask & ~BIT(BCS0); 4768 if (aux_inv) 4769 cmd += 2 * hweight8(aux_inv) + 2; 4770 4771 cs = intel_ring_begin(request, cmd); 4772 if (IS_ERR(cs)) 4773 return PTR_ERR(cs); 4774 4775 if (mode & EMIT_INVALIDATE) 4776 *cs++ = preparser_disable(true); 4777 4778 cmd = MI_FLUSH_DW + 1; 4779 4780 /* We always require a command barrier so that subsequent 4781 * commands, such as breadcrumb interrupts, are strictly ordered 4782 * wrt the contents of the write cache being flushed to memory 4783 * (and thus being coherent from the CPU). 4784 */ 4785 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 4786 4787 if (mode & EMIT_INVALIDATE) { 4788 cmd |= MI_INVALIDATE_TLB; 4789 if (request->engine->class == VIDEO_DECODE_CLASS) 4790 cmd |= MI_INVALIDATE_BSD; 4791 } 4792 4793 *cs++ = cmd; 4794 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 4795 *cs++ = 0; /* upper addr */ 4796 *cs++ = 0; /* value */ 4797 4798 if (aux_inv) { /* hsdes: 1809175790 */ 4799 struct intel_engine_cs *engine; 4800 unsigned int tmp; 4801 4802 *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv)); 4803 for_each_engine_masked(engine, request->engine->gt, 4804 aux_inv, tmp) { 4805 *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine)); 4806 *cs++ = AUX_INV; 4807 } 4808 *cs++ = MI_NOOP; 4809 } 4810 4811 if (mode & EMIT_INVALIDATE) 4812 *cs++ = preparser_disable(false); 4813 4814 intel_ring_advance(request, cs); 4815 4816 return 0; 4817 } 4818 4819 static void assert_request_valid(struct i915_request *rq) 4820 { 4821 struct intel_ring *ring __maybe_unused = rq->ring; 4822 4823 /* Can we unwind this request without appearing to go forwards? */ 4824 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); 4825 } 4826 4827 /* 4828 * Reserve space for 2 NOOPs at the end of each request to be 4829 * used as a workaround for not being allowed to do lite 4830 * restore with HEAD==TAIL (WaIdleLiteRestore). 4831 */ 4832 static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 4833 { 4834 /* Ensure there's always at least one preemption point per-request. */ 4835 *cs++ = MI_ARB_CHECK; 4836 *cs++ = MI_NOOP; 4837 request->wa_tail = intel_ring_offset(request, cs); 4838 4839 /* Check that entire request is less than half the ring */ 4840 assert_request_valid(request); 4841 4842 return cs; 4843 } 4844 4845 static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) 4846 { 4847 *cs++ = MI_SEMAPHORE_WAIT | 4848 MI_SEMAPHORE_GLOBAL_GTT | 4849 MI_SEMAPHORE_POLL | 4850 MI_SEMAPHORE_SAD_EQ_SDD; 4851 *cs++ = 0; 4852 *cs++ = intel_hws_preempt_address(request->engine); 4853 *cs++ = 0; 4854 4855 return cs; 4856 } 4857 4858 static __always_inline u32* 4859 gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) 4860 { 4861 *cs++ = MI_USER_INTERRUPT; 4862 4863 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4864 if (intel_engine_has_semaphores(request->engine)) 4865 cs = emit_preempt_busywait(request, cs); 4866 4867 request->tail = intel_ring_offset(request, cs); 4868 assert_ring_tail_valid(request->ring, request->tail); 4869 4870 return gen8_emit_wa_tail(request, cs); 4871 } 4872 4873 static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs) 4874 { 4875 u32 addr = i915_request_active_timeline(request)->hwsp_offset; 4876 4877 return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0); 4878 } 4879 4880 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) 4881 { 4882 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); 4883 } 4884 4885 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 4886 { 4887 cs = gen8_emit_pipe_control(cs, 4888 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 4889 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 4890 PIPE_CONTROL_DC_FLUSH_ENABLE, 4891 0); 4892 4893 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ 4894 cs = gen8_emit_ggtt_write_rcs(cs, 4895 request->fence.seqno, 4896 i915_request_active_timeline(request)->hwsp_offset, 4897 PIPE_CONTROL_FLUSH_ENABLE | 4898 PIPE_CONTROL_CS_STALL); 4899 4900 return gen8_emit_fini_breadcrumb_tail(request, cs); 4901 } 4902 4903 static u32 * 4904 gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 4905 { 4906 cs = gen8_emit_ggtt_write_rcs(cs, 4907 request->fence.seqno, 4908 i915_request_active_timeline(request)->hwsp_offset, 4909 PIPE_CONTROL_CS_STALL | 4910 PIPE_CONTROL_TILE_CACHE_FLUSH | 4911 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 4912 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 4913 PIPE_CONTROL_DC_FLUSH_ENABLE | 4914 PIPE_CONTROL_FLUSH_ENABLE); 4915 4916 return gen8_emit_fini_breadcrumb_tail(request, cs); 4917 } 4918 4919 /* 4920 * Note that the CS instruction pre-parser will not stall on the breadcrumb 4921 * flush and will continue pre-fetching the instructions after it before the 4922 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at 4923 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble 4924 * of the next request before the memory has been flushed, we're guaranteed that 4925 * we won't access the batch itself too early. 4926 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, 4927 * so, if the current request is modifying an instruction in the next request on 4928 * the same intel_context, we might pre-fetch and then execute the pre-update 4929 * instruction. To avoid this, the users of self-modifying code should either 4930 * disable the parser around the code emitting the memory writes, via a new flag 4931 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For 4932 * the in-kernel use-cases we've opted to use a separate context, see 4933 * reloc_gpu() as an example. 4934 * All the above applies only to the instructions themselves. Non-inline data 4935 * used by the instructions is not pre-fetched. 4936 */ 4937 4938 static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) 4939 { 4940 *cs++ = MI_SEMAPHORE_WAIT_TOKEN | 4941 MI_SEMAPHORE_GLOBAL_GTT | 4942 MI_SEMAPHORE_POLL | 4943 MI_SEMAPHORE_SAD_EQ_SDD; 4944 *cs++ = 0; 4945 *cs++ = intel_hws_preempt_address(request->engine); 4946 *cs++ = 0; 4947 *cs++ = 0; 4948 *cs++ = MI_NOOP; 4949 4950 return cs; 4951 } 4952 4953 static __always_inline u32* 4954 gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) 4955 { 4956 *cs++ = MI_USER_INTERRUPT; 4957 4958 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4959 if (intel_engine_has_semaphores(request->engine)) 4960 cs = gen12_emit_preempt_busywait(request, cs); 4961 4962 request->tail = intel_ring_offset(request, cs); 4963 assert_ring_tail_valid(request->ring, request->tail); 4964 4965 return gen8_emit_wa_tail(request, cs); 4966 } 4967 4968 static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) 4969 { 4970 return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); 4971 } 4972 4973 static u32 * 4974 gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 4975 { 4976 cs = gen12_emit_ggtt_write_rcs(cs, 4977 request->fence.seqno, 4978 i915_request_active_timeline(request)->hwsp_offset, 4979 PIPE_CONTROL0_HDC_PIPELINE_FLUSH, 4980 PIPE_CONTROL_CS_STALL | 4981 PIPE_CONTROL_TILE_CACHE_FLUSH | 4982 PIPE_CONTROL_FLUSH_L3 | 4983 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 4984 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 4985 /* Wa_1409600907:tgl */ 4986 PIPE_CONTROL_DEPTH_STALL | 4987 PIPE_CONTROL_DC_FLUSH_ENABLE | 4988 PIPE_CONTROL_FLUSH_ENABLE); 4989 4990 return gen12_emit_fini_breadcrumb_tail(request, cs); 4991 } 4992 4993 static void execlists_park(struct intel_engine_cs *engine) 4994 { 4995 cancel_timer(&engine->execlists.timer); 4996 cancel_timer(&engine->execlists.preempt); 4997 } 4998 4999 void intel_execlists_set_default_submission(struct intel_engine_cs *engine) 5000 { 5001 engine->submit_request = execlists_submit_request; 5002 engine->schedule = i915_schedule; 5003 engine->execlists.tasklet.func = execlists_submission_tasklet; 5004 5005 engine->reset.prepare = execlists_reset_prepare; 5006 engine->reset.rewind = execlists_reset_rewind; 5007 engine->reset.cancel = execlists_reset_cancel; 5008 engine->reset.finish = execlists_reset_finish; 5009 5010 engine->park = execlists_park; 5011 engine->unpark = NULL; 5012 5013 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 5014 if (!intel_vgpu_active(engine->i915)) { 5015 engine->flags |= I915_ENGINE_HAS_SEMAPHORES; 5016 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 5017 engine->flags |= I915_ENGINE_HAS_PREEMPTION; 5018 if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) 5019 engine->flags |= I915_ENGINE_HAS_TIMESLICES; 5020 } 5021 } 5022 5023 if (INTEL_GEN(engine->i915) >= 12) 5024 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; 5025 5026 if (intel_engine_has_preemption(engine)) 5027 engine->emit_bb_start = gen8_emit_bb_start; 5028 else 5029 engine->emit_bb_start = gen8_emit_bb_start_noarb; 5030 } 5031 5032 static void execlists_shutdown(struct intel_engine_cs *engine) 5033 { 5034 /* Synchronise with residual timers and any softirq they raise */ 5035 del_timer_sync(&engine->execlists.timer); 5036 del_timer_sync(&engine->execlists.preempt); 5037 tasklet_kill(&engine->execlists.tasklet); 5038 } 5039 5040 static void execlists_release(struct intel_engine_cs *engine) 5041 { 5042 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ 5043 5044 execlists_shutdown(engine); 5045 5046 intel_engine_cleanup_common(engine); 5047 lrc_destroy_wa_ctx(engine); 5048 } 5049 5050 static void 5051 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 5052 { 5053 /* Default vfuncs which can be overriden by each engine. */ 5054 5055 engine->resume = execlists_resume; 5056 5057 engine->cops = &execlists_context_ops; 5058 engine->request_alloc = execlists_request_alloc; 5059 5060 engine->emit_flush = gen8_emit_flush; 5061 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; 5062 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; 5063 if (INTEL_GEN(engine->i915) >= 12) { 5064 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; 5065 engine->emit_flush = gen12_emit_flush; 5066 } 5067 engine->set_default_submission = intel_execlists_set_default_submission; 5068 5069 if (INTEL_GEN(engine->i915) < 11) { 5070 engine->irq_enable = gen8_logical_ring_enable_irq; 5071 engine->irq_disable = gen8_logical_ring_disable_irq; 5072 } else { 5073 /* 5074 * TODO: On Gen11 interrupt masks need to be clear 5075 * to allow C6 entry. Keep interrupts enabled at 5076 * and take the hit of generating extra interrupts 5077 * until a more refined solution exists. 5078 */ 5079 } 5080 } 5081 5082 static inline void 5083 logical_ring_default_irqs(struct intel_engine_cs *engine) 5084 { 5085 unsigned int shift = 0; 5086 5087 if (INTEL_GEN(engine->i915) < 11) { 5088 const u8 irq_shifts[] = { 5089 [RCS0] = GEN8_RCS_IRQ_SHIFT, 5090 [BCS0] = GEN8_BCS_IRQ_SHIFT, 5091 [VCS0] = GEN8_VCS0_IRQ_SHIFT, 5092 [VCS1] = GEN8_VCS1_IRQ_SHIFT, 5093 [VECS0] = GEN8_VECS_IRQ_SHIFT, 5094 }; 5095 5096 shift = irq_shifts[engine->id]; 5097 } 5098 5099 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 5100 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 5101 engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; 5102 engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; 5103 } 5104 5105 static void rcs_submission_override(struct intel_engine_cs *engine) 5106 { 5107 switch (INTEL_GEN(engine->i915)) { 5108 case 12: 5109 engine->emit_flush = gen12_emit_flush_render; 5110 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; 5111 break; 5112 case 11: 5113 engine->emit_flush = gen11_emit_flush_render; 5114 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; 5115 break; 5116 default: 5117 engine->emit_flush = gen8_emit_flush_render; 5118 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; 5119 break; 5120 } 5121 } 5122 5123 int intel_execlists_submission_setup(struct intel_engine_cs *engine) 5124 { 5125 struct intel_engine_execlists * const execlists = &engine->execlists; 5126 struct drm_i915_private *i915 = engine->i915; 5127 struct intel_uncore *uncore = engine->uncore; 5128 u32 base = engine->mmio_base; 5129 5130 tasklet_init(&engine->execlists.tasklet, 5131 execlists_submission_tasklet, (unsigned long)engine); 5132 timer_setup(&engine->execlists.timer, execlists_timeslice, 0); 5133 timer_setup(&engine->execlists.preempt, execlists_preempt, 0); 5134 5135 logical_ring_default_vfuncs(engine); 5136 logical_ring_default_irqs(engine); 5137 5138 if (engine->class == RENDER_CLASS) 5139 rcs_submission_override(engine); 5140 5141 if (intel_init_workaround_bb(engine)) 5142 /* 5143 * We continue even if we fail to initialize WA batch 5144 * because we only expect rare glitches but nothing 5145 * critical to prevent us from using GPU 5146 */ 5147 drm_err(&i915->drm, "WA batch buffer initialization failed\n"); 5148 5149 if (HAS_LOGICAL_RING_ELSQ(i915)) { 5150 execlists->submit_reg = uncore->regs + 5151 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); 5152 execlists->ctrl_reg = uncore->regs + 5153 i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); 5154 } else { 5155 execlists->submit_reg = uncore->regs + 5156 i915_mmio_reg_offset(RING_ELSP(base)); 5157 } 5158 5159 execlists->csb_status = 5160 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 5161 5162 execlists->csb_write = 5163 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; 5164 5165 if (INTEL_GEN(i915) < 11) 5166 execlists->csb_size = GEN8_CSB_ENTRIES; 5167 else 5168 execlists->csb_size = GEN11_CSB_ENTRIES; 5169 5170 if (INTEL_GEN(engine->i915) >= 11) { 5171 execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); 5172 execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); 5173 } 5174 5175 /* Finally, take ownership and responsibility for cleanup! */ 5176 engine->sanitize = execlists_sanitize; 5177 engine->release = execlists_release; 5178 5179 return 0; 5180 } 5181 5182 static void init_common_reg_state(u32 * const regs, 5183 const struct intel_engine_cs *engine, 5184 const struct intel_ring *ring, 5185 bool inhibit) 5186 { 5187 u32 ctl; 5188 5189 ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); 5190 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 5191 if (inhibit) 5192 ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; 5193 if (INTEL_GEN(engine->i915) < 11) 5194 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | 5195 CTX_CTRL_RS_CTX_ENABLE); 5196 regs[CTX_CONTEXT_CONTROL] = ctl; 5197 5198 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 5199 regs[CTX_TIMESTAMP] = 0; 5200 } 5201 5202 static void init_wa_bb_reg_state(u32 * const regs, 5203 const struct intel_engine_cs *engine) 5204 { 5205 const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; 5206 5207 if (wa_ctx->per_ctx.size) { 5208 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 5209 5210 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); 5211 regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = 5212 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 5213 } 5214 5215 if (wa_ctx->indirect_ctx.size) { 5216 lrc_ring_setup_indirect_ctx(regs, engine, 5217 i915_ggtt_offset(wa_ctx->vma) + 5218 wa_ctx->indirect_ctx.offset, 5219 wa_ctx->indirect_ctx.size); 5220 } 5221 } 5222 5223 static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) 5224 { 5225 if (i915_vm_is_4lvl(&ppgtt->vm)) { 5226 /* 64b PPGTT (48bit canonical) 5227 * PDP0_DESCRIPTOR contains the base address to PML4 and 5228 * other PDP Descriptors are ignored. 5229 */ 5230 ASSIGN_CTX_PML4(ppgtt, regs); 5231 } else { 5232 ASSIGN_CTX_PDP(ppgtt, regs, 3); 5233 ASSIGN_CTX_PDP(ppgtt, regs, 2); 5234 ASSIGN_CTX_PDP(ppgtt, regs, 1); 5235 ASSIGN_CTX_PDP(ppgtt, regs, 0); 5236 } 5237 } 5238 5239 static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) 5240 { 5241 if (i915_is_ggtt(vm)) 5242 return i915_vm_to_ggtt(vm)->alias; 5243 else 5244 return i915_vm_to_ppgtt(vm); 5245 } 5246 5247 static void execlists_init_reg_state(u32 *regs, 5248 const struct intel_context *ce, 5249 const struct intel_engine_cs *engine, 5250 const struct intel_ring *ring, 5251 bool inhibit) 5252 { 5253 /* 5254 * A context is actually a big batch buffer with several 5255 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The 5256 * values we are setting here are only for the first context restore: 5257 * on a subsequent save, the GPU will recreate this batchbuffer with new 5258 * values (including all the missing MI_LOAD_REGISTER_IMM commands that 5259 * we are not initializing here). 5260 * 5261 * Must keep consistent with virtual_update_register_offsets(). 5262 */ 5263 set_offsets(regs, reg_offsets(engine), engine, inhibit); 5264 5265 init_common_reg_state(regs, engine, ring, inhibit); 5266 init_ppgtt_reg_state(regs, vm_alias(ce->vm)); 5267 5268 init_wa_bb_reg_state(regs, engine); 5269 5270 __reset_stop_ring(regs, engine); 5271 } 5272 5273 static int 5274 populate_lr_context(struct intel_context *ce, 5275 struct drm_i915_gem_object *ctx_obj, 5276 struct intel_engine_cs *engine, 5277 struct intel_ring *ring) 5278 { 5279 bool inhibit = true; 5280 void *vaddr; 5281 5282 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 5283 if (IS_ERR(vaddr)) { 5284 drm_dbg(&engine->i915->drm, "Could not map object pages!\n"); 5285 return PTR_ERR(vaddr); 5286 } 5287 5288 set_redzone(vaddr, engine); 5289 5290 if (engine->default_state) { 5291 shmem_read(engine->default_state, 0, 5292 vaddr, engine->context_size); 5293 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 5294 inhibit = false; 5295 } 5296 5297 /* Clear the ppHWSP (inc. per-context counters) */ 5298 memset(vaddr, 0, PAGE_SIZE); 5299 5300 /* 5301 * The second page of the context object contains some registers which 5302 * must be set up prior to the first execution. 5303 */ 5304 execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, 5305 ce, engine, ring, inhibit); 5306 5307 __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); 5308 i915_gem_object_unpin_map(ctx_obj); 5309 return 0; 5310 } 5311 5312 static struct intel_timeline *pinned_timeline(struct intel_context *ce) 5313 { 5314 struct intel_timeline *tl = fetch_and_zero(&ce->timeline); 5315 5316 return intel_timeline_create_from_engine(ce->engine, 5317 page_unmask_bits(tl)); 5318 } 5319 5320 static int __execlists_context_alloc(struct intel_context *ce, 5321 struct intel_engine_cs *engine) 5322 { 5323 struct drm_i915_gem_object *ctx_obj; 5324 struct intel_ring *ring; 5325 struct i915_vma *vma; 5326 u32 context_size; 5327 int ret; 5328 5329 GEM_BUG_ON(ce->state); 5330 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 5331 5332 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 5333 context_size += I915_GTT_PAGE_SIZE; /* for redzone */ 5334 5335 if (INTEL_GEN(engine->i915) == 12) { 5336 ce->wa_bb_page = context_size / PAGE_SIZE; 5337 context_size += PAGE_SIZE; 5338 } 5339 5340 ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); 5341 if (IS_ERR(ctx_obj)) 5342 return PTR_ERR(ctx_obj); 5343 5344 vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); 5345 if (IS_ERR(vma)) { 5346 ret = PTR_ERR(vma); 5347 goto error_deref_obj; 5348 } 5349 5350 if (!page_mask_bits(ce->timeline)) { 5351 struct intel_timeline *tl; 5352 5353 /* 5354 * Use the static global HWSP for the kernel context, and 5355 * a dynamically allocated cacheline for everyone else. 5356 */ 5357 if (unlikely(ce->timeline)) 5358 tl = pinned_timeline(ce); 5359 else 5360 tl = intel_timeline_create(engine->gt); 5361 if (IS_ERR(tl)) { 5362 ret = PTR_ERR(tl); 5363 goto error_deref_obj; 5364 } 5365 5366 ce->timeline = tl; 5367 } 5368 5369 ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); 5370 if (IS_ERR(ring)) { 5371 ret = PTR_ERR(ring); 5372 goto error_deref_obj; 5373 } 5374 5375 ret = populate_lr_context(ce, ctx_obj, engine, ring); 5376 if (ret) { 5377 drm_dbg(&engine->i915->drm, 5378 "Failed to populate LRC: %d\n", ret); 5379 goto error_ring_free; 5380 } 5381 5382 ce->ring = ring; 5383 ce->state = vma; 5384 5385 return 0; 5386 5387 error_ring_free: 5388 intel_ring_put(ring); 5389 error_deref_obj: 5390 i915_gem_object_put(ctx_obj); 5391 return ret; 5392 } 5393 5394 static struct list_head *virtual_queue(struct virtual_engine *ve) 5395 { 5396 return &ve->base.execlists.default_priolist.requests[0]; 5397 } 5398 5399 static void virtual_context_destroy(struct kref *kref) 5400 { 5401 struct virtual_engine *ve = 5402 container_of(kref, typeof(*ve), context.ref); 5403 unsigned int n; 5404 5405 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 5406 GEM_BUG_ON(ve->request); 5407 GEM_BUG_ON(ve->context.inflight); 5408 5409 for (n = 0; n < ve->num_siblings; n++) { 5410 struct intel_engine_cs *sibling = ve->siblings[n]; 5411 struct rb_node *node = &ve->nodes[sibling->id].rb; 5412 unsigned long flags; 5413 5414 if (RB_EMPTY_NODE(node)) 5415 continue; 5416 5417 spin_lock_irqsave(&sibling->active.lock, flags); 5418 5419 /* Detachment is lazily performed in the execlists tasklet */ 5420 if (!RB_EMPTY_NODE(node)) 5421 rb_erase_cached(node, &sibling->execlists.virtual); 5422 5423 spin_unlock_irqrestore(&sibling->active.lock, flags); 5424 } 5425 GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); 5426 5427 if (ve->context.state) 5428 __execlists_context_fini(&ve->context); 5429 intel_context_fini(&ve->context); 5430 5431 intel_engine_free_request_pool(&ve->base); 5432 5433 kfree(ve->bonds); 5434 kfree(ve); 5435 } 5436 5437 static void virtual_engine_initial_hint(struct virtual_engine *ve) 5438 { 5439 int swp; 5440 5441 /* 5442 * Pick a random sibling on starting to help spread the load around. 5443 * 5444 * New contexts are typically created with exactly the same order 5445 * of siblings, and often started in batches. Due to the way we iterate 5446 * the array of sibling when submitting requests, sibling[0] is 5447 * prioritised for dequeuing. If we make sure that sibling[0] is fairly 5448 * randomised across the system, we also help spread the load by the 5449 * first engine we inspect being different each time. 5450 * 5451 * NB This does not force us to execute on this engine, it will just 5452 * typically be the first we inspect for submission. 5453 */ 5454 swp = prandom_u32_max(ve->num_siblings); 5455 if (swp) 5456 swap(ve->siblings[swp], ve->siblings[0]); 5457 } 5458 5459 static int virtual_context_alloc(struct intel_context *ce) 5460 { 5461 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5462 5463 return __execlists_context_alloc(ce, ve->siblings[0]); 5464 } 5465 5466 static int virtual_context_pin(struct intel_context *ce, void *vaddr) 5467 { 5468 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5469 5470 /* Note: we must use a real engine class for setting up reg state */ 5471 return __execlists_context_pin(ce, ve->siblings[0], vaddr); 5472 } 5473 5474 static void virtual_context_enter(struct intel_context *ce) 5475 { 5476 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5477 unsigned int n; 5478 5479 for (n = 0; n < ve->num_siblings; n++) 5480 intel_engine_pm_get(ve->siblings[n]); 5481 5482 intel_timeline_enter(ce->timeline); 5483 } 5484 5485 static void virtual_context_exit(struct intel_context *ce) 5486 { 5487 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5488 unsigned int n; 5489 5490 intel_timeline_exit(ce->timeline); 5491 5492 for (n = 0; n < ve->num_siblings; n++) 5493 intel_engine_pm_put(ve->siblings[n]); 5494 } 5495 5496 static const struct intel_context_ops virtual_context_ops = { 5497 .alloc = virtual_context_alloc, 5498 5499 .pre_pin = execlists_context_pre_pin, 5500 .pin = virtual_context_pin, 5501 .unpin = execlists_context_unpin, 5502 .post_unpin = execlists_context_post_unpin, 5503 5504 .enter = virtual_context_enter, 5505 .exit = virtual_context_exit, 5506 5507 .destroy = virtual_context_destroy, 5508 }; 5509 5510 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) 5511 { 5512 struct i915_request *rq; 5513 intel_engine_mask_t mask; 5514 5515 rq = READ_ONCE(ve->request); 5516 if (!rq) 5517 return 0; 5518 5519 /* The rq is ready for submission; rq->execution_mask is now stable. */ 5520 mask = rq->execution_mask; 5521 if (unlikely(!mask)) { 5522 /* Invalid selection, submit to a random engine in error */ 5523 i915_request_set_error_once(rq, -ENODEV); 5524 mask = ve->siblings[0]->mask; 5525 } 5526 5527 ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", 5528 rq->fence.context, rq->fence.seqno, 5529 mask, ve->base.execlists.queue_priority_hint); 5530 5531 return mask; 5532 } 5533 5534 static void virtual_submission_tasklet(unsigned long data) 5535 { 5536 struct virtual_engine * const ve = (struct virtual_engine *)data; 5537 const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); 5538 intel_engine_mask_t mask; 5539 unsigned int n; 5540 5541 rcu_read_lock(); 5542 mask = virtual_submission_mask(ve); 5543 rcu_read_unlock(); 5544 if (unlikely(!mask)) 5545 return; 5546 5547 local_irq_disable(); 5548 for (n = 0; n < ve->num_siblings; n++) { 5549 struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); 5550 struct ve_node * const node = &ve->nodes[sibling->id]; 5551 struct rb_node **parent, *rb; 5552 bool first; 5553 5554 if (!READ_ONCE(ve->request)) 5555 break; /* already handled by a sibling's tasklet */ 5556 5557 if (unlikely(!(mask & sibling->mask))) { 5558 if (!RB_EMPTY_NODE(&node->rb)) { 5559 spin_lock(&sibling->active.lock); 5560 rb_erase_cached(&node->rb, 5561 &sibling->execlists.virtual); 5562 RB_CLEAR_NODE(&node->rb); 5563 spin_unlock(&sibling->active.lock); 5564 } 5565 continue; 5566 } 5567 5568 spin_lock(&sibling->active.lock); 5569 5570 if (!RB_EMPTY_NODE(&node->rb)) { 5571 /* 5572 * Cheat and avoid rebalancing the tree if we can 5573 * reuse this node in situ. 5574 */ 5575 first = rb_first_cached(&sibling->execlists.virtual) == 5576 &node->rb; 5577 if (prio == node->prio || (prio > node->prio && first)) 5578 goto submit_engine; 5579 5580 rb_erase_cached(&node->rb, &sibling->execlists.virtual); 5581 } 5582 5583 rb = NULL; 5584 first = true; 5585 parent = &sibling->execlists.virtual.rb_root.rb_node; 5586 while (*parent) { 5587 struct ve_node *other; 5588 5589 rb = *parent; 5590 other = rb_entry(rb, typeof(*other), rb); 5591 if (prio > other->prio) { 5592 parent = &rb->rb_left; 5593 } else { 5594 parent = &rb->rb_right; 5595 first = false; 5596 } 5597 } 5598 5599 rb_link_node(&node->rb, rb, parent); 5600 rb_insert_color_cached(&node->rb, 5601 &sibling->execlists.virtual, 5602 first); 5603 5604 submit_engine: 5605 GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); 5606 node->prio = prio; 5607 if (first && prio > sibling->execlists.queue_priority_hint) 5608 tasklet_hi_schedule(&sibling->execlists.tasklet); 5609 5610 spin_unlock(&sibling->active.lock); 5611 } 5612 local_irq_enable(); 5613 } 5614 5615 static void virtual_submit_request(struct i915_request *rq) 5616 { 5617 struct virtual_engine *ve = to_virtual_engine(rq->engine); 5618 struct i915_request *old; 5619 unsigned long flags; 5620 5621 ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", 5622 rq->fence.context, 5623 rq->fence.seqno); 5624 5625 GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); 5626 5627 spin_lock_irqsave(&ve->base.active.lock, flags); 5628 5629 old = ve->request; 5630 if (old) { /* background completion event from preempt-to-busy */ 5631 GEM_BUG_ON(!i915_request_completed(old)); 5632 __i915_request_submit(old); 5633 i915_request_put(old); 5634 } 5635 5636 if (i915_request_completed(rq)) { 5637 __i915_request_submit(rq); 5638 5639 ve->base.execlists.queue_priority_hint = INT_MIN; 5640 ve->request = NULL; 5641 } else { 5642 ve->base.execlists.queue_priority_hint = rq_prio(rq); 5643 ve->request = i915_request_get(rq); 5644 5645 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 5646 list_move_tail(&rq->sched.link, virtual_queue(ve)); 5647 5648 tasklet_hi_schedule(&ve->base.execlists.tasklet); 5649 } 5650 5651 spin_unlock_irqrestore(&ve->base.active.lock, flags); 5652 } 5653 5654 static struct ve_bond * 5655 virtual_find_bond(struct virtual_engine *ve, 5656 const struct intel_engine_cs *master) 5657 { 5658 int i; 5659 5660 for (i = 0; i < ve->num_bonds; i++) { 5661 if (ve->bonds[i].master == master) 5662 return &ve->bonds[i]; 5663 } 5664 5665 return NULL; 5666 } 5667 5668 static void 5669 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) 5670 { 5671 struct virtual_engine *ve = to_virtual_engine(rq->engine); 5672 intel_engine_mask_t allowed, exec; 5673 struct ve_bond *bond; 5674 5675 allowed = ~to_request(signal)->engine->mask; 5676 5677 bond = virtual_find_bond(ve, to_request(signal)->engine); 5678 if (bond) 5679 allowed &= bond->sibling_mask; 5680 5681 /* Restrict the bonded request to run on only the available engines */ 5682 exec = READ_ONCE(rq->execution_mask); 5683 while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) 5684 ; 5685 5686 /* Prevent the master from being re-run on the bonded engines */ 5687 to_request(signal)->execution_mask &= ~allowed; 5688 } 5689 5690 struct intel_context * 5691 intel_execlists_create_virtual(struct intel_engine_cs **siblings, 5692 unsigned int count) 5693 { 5694 struct virtual_engine *ve; 5695 unsigned int n; 5696 int err; 5697 5698 if (count == 0) 5699 return ERR_PTR(-EINVAL); 5700 5701 if (count == 1) 5702 return intel_context_create(siblings[0]); 5703 5704 ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); 5705 if (!ve) 5706 return ERR_PTR(-ENOMEM); 5707 5708 ve->base.i915 = siblings[0]->i915; 5709 ve->base.gt = siblings[0]->gt; 5710 ve->base.uncore = siblings[0]->uncore; 5711 ve->base.id = -1; 5712 5713 ve->base.class = OTHER_CLASS; 5714 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; 5715 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5716 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5717 5718 /* 5719 * The decision on whether to submit a request using semaphores 5720 * depends on the saturated state of the engine. We only compute 5721 * this during HW submission of the request, and we need for this 5722 * state to be globally applied to all requests being submitted 5723 * to this engine. Virtual engines encompass more than one physical 5724 * engine and so we cannot accurately tell in advance if one of those 5725 * engines is already saturated and so cannot afford to use a semaphore 5726 * and be pessimized in priority for doing so -- if we are the only 5727 * context using semaphores after all other clients have stopped, we 5728 * will be starved on the saturated system. Such a global switch for 5729 * semaphores is less than ideal, but alas is the current compromise. 5730 */ 5731 ve->base.saturated = ALL_ENGINES; 5732 5733 snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); 5734 5735 intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); 5736 intel_engine_init_execlists(&ve->base); 5737 5738 ve->base.cops = &virtual_context_ops; 5739 ve->base.request_alloc = execlists_request_alloc; 5740 5741 ve->base.schedule = i915_schedule; 5742 ve->base.submit_request = virtual_submit_request; 5743 ve->base.bond_execute = virtual_bond_execute; 5744 5745 INIT_LIST_HEAD(virtual_queue(ve)); 5746 ve->base.execlists.queue_priority_hint = INT_MIN; 5747 tasklet_init(&ve->base.execlists.tasklet, 5748 virtual_submission_tasklet, 5749 (unsigned long)ve); 5750 5751 intel_context_init(&ve->context, &ve->base); 5752 5753 ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); 5754 if (!ve->base.breadcrumbs) { 5755 err = -ENOMEM; 5756 goto err_put; 5757 } 5758 5759 for (n = 0; n < count; n++) { 5760 struct intel_engine_cs *sibling = siblings[n]; 5761 5762 GEM_BUG_ON(!is_power_of_2(sibling->mask)); 5763 if (sibling->mask & ve->base.mask) { 5764 DRM_DEBUG("duplicate %s entry in load balancer\n", 5765 sibling->name); 5766 err = -EINVAL; 5767 goto err_put; 5768 } 5769 5770 /* 5771 * The virtual engine implementation is tightly coupled to 5772 * the execlists backend -- we push out request directly 5773 * into a tree inside each physical engine. We could support 5774 * layering if we handle cloning of the requests and 5775 * submitting a copy into each backend. 5776 */ 5777 if (sibling->execlists.tasklet.func != 5778 execlists_submission_tasklet) { 5779 err = -ENODEV; 5780 goto err_put; 5781 } 5782 5783 GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); 5784 RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); 5785 5786 ve->siblings[ve->num_siblings++] = sibling; 5787 ve->base.mask |= sibling->mask; 5788 5789 /* 5790 * All physical engines must be compatible for their emission 5791 * functions (as we build the instructions during request 5792 * construction and do not alter them before submission 5793 * on the physical engine). We use the engine class as a guide 5794 * here, although that could be refined. 5795 */ 5796 if (ve->base.class != OTHER_CLASS) { 5797 if (ve->base.class != sibling->class) { 5798 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", 5799 sibling->class, ve->base.class); 5800 err = -EINVAL; 5801 goto err_put; 5802 } 5803 continue; 5804 } 5805 5806 ve->base.class = sibling->class; 5807 ve->base.uabi_class = sibling->uabi_class; 5808 snprintf(ve->base.name, sizeof(ve->base.name), 5809 "v%dx%d", ve->base.class, count); 5810 ve->base.context_size = sibling->context_size; 5811 5812 ve->base.emit_bb_start = sibling->emit_bb_start; 5813 ve->base.emit_flush = sibling->emit_flush; 5814 ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; 5815 ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; 5816 ve->base.emit_fini_breadcrumb_dw = 5817 sibling->emit_fini_breadcrumb_dw; 5818 5819 ve->base.flags = sibling->flags; 5820 } 5821 5822 ve->base.flags |= I915_ENGINE_IS_VIRTUAL; 5823 5824 virtual_engine_initial_hint(ve); 5825 return &ve->context; 5826 5827 err_put: 5828 intel_context_put(&ve->context); 5829 return ERR_PTR(err); 5830 } 5831 5832 struct intel_context * 5833 intel_execlists_clone_virtual(struct intel_engine_cs *src) 5834 { 5835 struct virtual_engine *se = to_virtual_engine(src); 5836 struct intel_context *dst; 5837 5838 dst = intel_execlists_create_virtual(se->siblings, 5839 se->num_siblings); 5840 if (IS_ERR(dst)) 5841 return dst; 5842 5843 if (se->num_bonds) { 5844 struct virtual_engine *de = to_virtual_engine(dst->engine); 5845 5846 de->bonds = kmemdup(se->bonds, 5847 sizeof(*se->bonds) * se->num_bonds, 5848 GFP_KERNEL); 5849 if (!de->bonds) { 5850 intel_context_put(dst); 5851 return ERR_PTR(-ENOMEM); 5852 } 5853 5854 de->num_bonds = se->num_bonds; 5855 } 5856 5857 return dst; 5858 } 5859 5860 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, 5861 const struct intel_engine_cs *master, 5862 const struct intel_engine_cs *sibling) 5863 { 5864 struct virtual_engine *ve = to_virtual_engine(engine); 5865 struct ve_bond *bond; 5866 int n; 5867 5868 /* Sanity check the sibling is part of the virtual engine */ 5869 for (n = 0; n < ve->num_siblings; n++) 5870 if (sibling == ve->siblings[n]) 5871 break; 5872 if (n == ve->num_siblings) 5873 return -EINVAL; 5874 5875 bond = virtual_find_bond(ve, master); 5876 if (bond) { 5877 bond->sibling_mask |= sibling->mask; 5878 return 0; 5879 } 5880 5881 bond = krealloc(ve->bonds, 5882 sizeof(*bond) * (ve->num_bonds + 1), 5883 GFP_KERNEL); 5884 if (!bond) 5885 return -ENOMEM; 5886 5887 bond[ve->num_bonds].master = master; 5888 bond[ve->num_bonds].sibling_mask = sibling->mask; 5889 5890 ve->bonds = bond; 5891 ve->num_bonds++; 5892 5893 return 0; 5894 } 5895 5896 struct intel_engine_cs * 5897 intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, 5898 unsigned int sibling) 5899 { 5900 struct virtual_engine *ve = to_virtual_engine(engine); 5901 5902 if (sibling >= ve->num_siblings) 5903 return NULL; 5904 5905 return ve->siblings[sibling]; 5906 } 5907 5908 void intel_execlists_show_requests(struct intel_engine_cs *engine, 5909 struct drm_printer *m, 5910 void (*show_request)(struct drm_printer *m, 5911 struct i915_request *rq, 5912 const char *prefix), 5913 unsigned int max) 5914 { 5915 const struct intel_engine_execlists *execlists = &engine->execlists; 5916 struct i915_request *rq, *last; 5917 unsigned long flags; 5918 unsigned int count; 5919 struct rb_node *rb; 5920 5921 spin_lock_irqsave(&engine->active.lock, flags); 5922 5923 last = NULL; 5924 count = 0; 5925 list_for_each_entry(rq, &engine->active.requests, sched.link) { 5926 if (count++ < max - 1) 5927 show_request(m, rq, "\t\tE "); 5928 else 5929 last = rq; 5930 } 5931 if (last) { 5932 if (count > max) { 5933 drm_printf(m, 5934 "\t\t...skipping %d executing requests...\n", 5935 count - max); 5936 } 5937 show_request(m, last, "\t\tE "); 5938 } 5939 5940 if (execlists->switch_priority_hint != INT_MIN) 5941 drm_printf(m, "\t\tSwitch priority hint: %d\n", 5942 READ_ONCE(execlists->switch_priority_hint)); 5943 if (execlists->queue_priority_hint != INT_MIN) 5944 drm_printf(m, "\t\tQueue priority hint: %d\n", 5945 READ_ONCE(execlists->queue_priority_hint)); 5946 5947 last = NULL; 5948 count = 0; 5949 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 5950 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 5951 int i; 5952 5953 priolist_for_each_request(rq, p, i) { 5954 if (count++ < max - 1) 5955 show_request(m, rq, "\t\tQ "); 5956 else 5957 last = rq; 5958 } 5959 } 5960 if (last) { 5961 if (count > max) { 5962 drm_printf(m, 5963 "\t\t...skipping %d queued requests...\n", 5964 count - max); 5965 } 5966 show_request(m, last, "\t\tQ "); 5967 } 5968 5969 last = NULL; 5970 count = 0; 5971 for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { 5972 struct virtual_engine *ve = 5973 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 5974 struct i915_request *rq = READ_ONCE(ve->request); 5975 5976 if (rq) { 5977 if (count++ < max - 1) 5978 show_request(m, rq, "\t\tV "); 5979 else 5980 last = rq; 5981 } 5982 } 5983 if (last) { 5984 if (count > max) { 5985 drm_printf(m, 5986 "\t\t...skipping %d virtual requests...\n", 5987 count - max); 5988 } 5989 show_request(m, last, "\t\tV "); 5990 } 5991 5992 spin_unlock_irqrestore(&engine->active.lock, flags); 5993 } 5994 5995 void intel_lr_context_reset(struct intel_engine_cs *engine, 5996 struct intel_context *ce, 5997 u32 head, 5998 bool scrub) 5999 { 6000 GEM_BUG_ON(!intel_context_is_pinned(ce)); 6001 6002 /* 6003 * We want a simple context + ring to execute the breadcrumb update. 6004 * We cannot rely on the context being intact across the GPU hang, 6005 * so clear it and rebuild just what we need for the breadcrumb. 6006 * All pending requests for this context will be zapped, and any 6007 * future request will be after userspace has had the opportunity 6008 * to recreate its own state. 6009 */ 6010 if (scrub) 6011 restore_default_state(ce, engine); 6012 6013 /* Rerun the request; its payload has been neutered (if guilty). */ 6014 __execlists_update_reg_state(ce, engine, head); 6015 } 6016 6017 bool 6018 intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) 6019 { 6020 return engine->set_default_submission == 6021 intel_execlists_set_default_submission; 6022 } 6023 6024 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 6025 #include "selftest_lrc.c" 6026 #endif 6027