1 /* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Ben Widawsky <ben@bwidawsk.net> 25 * Michel Thierry <michel.thierry@intel.com> 26 * Thomas Daniel <thomas.daniel@intel.com> 27 * Oscar Mateo <oscar.mateo@intel.com> 28 * 29 */ 30 31 /** 32 * DOC: Logical Rings, Logical Ring Contexts and Execlists 33 * 34 * Motivation: 35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". 36 * These expanded contexts enable a number of new abilities, especially 37 * "Execlists" (also implemented in this file). 38 * 39 * One of the main differences with the legacy HW contexts is that logical 40 * ring contexts incorporate many more things to the context's state, like 41 * PDPs or ringbuffer control registers: 42 * 43 * The reason why PDPs are included in the context is straightforward: as 44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs 45 * contained there mean you don't need to do a ppgtt->switch_mm yourself, 46 * instead, the GPU will do it for you on the context switch. 47 * 48 * But, what about the ringbuffer control registers (head, tail, etc..)? 49 * shouldn't we just need a set of those per engine command streamer? This is 50 * where the name "Logical Rings" starts to make sense: by virtualizing the 51 * rings, the engine cs shifts to a new "ring buffer" with every context 52 * switch. When you want to submit a workload to the GPU you: A) choose your 53 * context, B) find its appropriate virtualized ring, C) write commands to it 54 * and then, finally, D) tell the GPU to switch to that context. 55 * 56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch 57 * to a contexts is via a context execution list, ergo "Execlists". 58 * 59 * LRC implementation: 60 * Regarding the creation of contexts, we have: 61 * 62 * - One global default context. 63 * - One local default context for each opened fd. 64 * - One local extra context for each context create ioctl call. 65 * 66 * Now that ringbuffers belong per-context (and not per-engine, like before) 67 * and that contexts are uniquely tied to a given engine (and not reusable, 68 * like before) we need: 69 * 70 * - One ringbuffer per-engine inside each context. 71 * - One backing object per-engine inside each context. 72 * 73 * The global default context starts its life with these new objects fully 74 * allocated and populated. The local default context for each opened fd is 75 * more complex, because we don't know at creation time which engine is going 76 * to use them. To handle this, we have implemented a deferred creation of LR 77 * contexts: 78 * 79 * The local context starts its life as a hollow or blank holder, that only 80 * gets populated for a given engine once we receive an execbuffer. If later 81 * on we receive another execbuffer ioctl for the same context but a different 82 * engine, we allocate/populate a new ringbuffer and context backing object and 83 * so on. 84 * 85 * Finally, regarding local contexts created using the ioctl call: as they are 86 * only allowed with the render ring, we can allocate & populate them right 87 * away (no need to defer anything, at least for now). 88 * 89 * Execlists implementation: 90 * Execlists are the new method by which, on gen8+ hardware, workloads are 91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method). 92 * This method works as follows: 93 * 94 * When a request is committed, its commands (the BB start and any leading or 95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer 96 * for the appropriate context. The tail pointer in the hardware context is not 97 * updated at this time, but instead, kept by the driver in the ringbuffer 98 * structure. A structure representing this request is added to a request queue 99 * for the appropriate engine: this structure contains a copy of the context's 100 * tail after the request was written to the ring buffer and a pointer to the 101 * context itself. 102 * 103 * If the engine's request queue was empty before the request was added, the 104 * queue is processed immediately. Otherwise the queue will be processed during 105 * a context switch interrupt. In any case, elements on the queue will get sent 106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a 107 * globally unique 20-bits submission ID. 108 * 109 * When execution of a request completes, the GPU updates the context status 110 * buffer with a context complete event and generates a context switch interrupt. 111 * During the interrupt handling, the driver examines the events in the buffer: 112 * for each context complete event, if the announced ID matches that on the head 113 * of the request queue, then that request is retired and removed from the queue. 114 * 115 * After processing, if any requests were retired and the queue is not empty 116 * then a new execution list can be submitted. The two requests at the front of 117 * the queue are next to be submitted but since a context may not occur twice in 118 * an execution list, if subsequent requests have the same ID as the first then 119 * the two requests must be combined. This is done simply by discarding requests 120 * at the head of the queue until either only one requests is left (in which case 121 * we use a NULL second context) or the first two requests have unique IDs. 122 * 123 * By always executing the first two requests in the queue the driver ensures 124 * that the GPU is kept as busy as possible. In the case where a single context 125 * completes but a second context is still executing, the request for this second 126 * context will be at the head of the queue when we remove the first one. This 127 * request will then be resubmitted along with a new request for a different context, 128 * which will cause the hardware to continue executing the second request and queue 129 * the new request (the GPU detects the condition of a context getting preempted 130 * with the same context and optimizes the context switch flow by not doing 131 * preemption, but just sampling the new tail pointer). 132 * 133 */ 134 #include <linux/interrupt.h> 135 136 #include "i915_drv.h" 137 #include "i915_perf.h" 138 #include "i915_trace.h" 139 #include "i915_vgpu.h" 140 #include "intel_breadcrumbs.h" 141 #include "intel_context.h" 142 #include "intel_engine_pm.h" 143 #include "intel_gt.h" 144 #include "intel_gt_pm.h" 145 #include "intel_gt_requests.h" 146 #include "intel_lrc_reg.h" 147 #include "intel_mocs.h" 148 #include "intel_reset.h" 149 #include "intel_ring.h" 150 #include "intel_workarounds.h" 151 #include "shmem_utils.h" 152 153 #define RING_EXECLIST_QFULL (1 << 0x2) 154 #define RING_EXECLIST1_VALID (1 << 0x3) 155 #define RING_EXECLIST0_VALID (1 << 0x4) 156 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) 157 #define RING_EXECLIST1_ACTIVE (1 << 0x11) 158 #define RING_EXECLIST0_ACTIVE (1 << 0x12) 159 160 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) 161 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) 162 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) 163 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) 164 #define GEN8_CTX_STATUS_COMPLETE (1 << 4) 165 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) 166 167 #define GEN8_CTX_STATUS_COMPLETED_MASK \ 168 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) 169 170 #define CTX_DESC_FORCE_RESTORE BIT_ULL(2) 171 172 #define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ 173 #define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ 174 #define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) 175 #define GEN12_IDLE_CTX_ID 0x7FF 176 #define GEN12_CSB_CTX_VALID(csb_dw) \ 177 (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) 178 179 /* Typical size of the average request (2 pipecontrols and a MI_BB) */ 180 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 181 182 struct virtual_engine { 183 struct intel_engine_cs base; 184 struct intel_context context; 185 186 /* 187 * We allow only a single request through the virtual engine at a time 188 * (each request in the timeline waits for the completion fence of 189 * the previous before being submitted). By restricting ourselves to 190 * only submitting a single request, each request is placed on to a 191 * physical to maximise load spreading (by virtue of the late greedy 192 * scheduling -- each real engine takes the next available request 193 * upon idling). 194 */ 195 struct i915_request *request; 196 197 /* 198 * We keep a rbtree of available virtual engines inside each physical 199 * engine, sorted by priority. Here we preallocate the nodes we need 200 * for the virtual engine, indexed by physical_engine->id. 201 */ 202 struct ve_node { 203 struct rb_node rb; 204 int prio; 205 } nodes[I915_NUM_ENGINES]; 206 207 /* 208 * Keep track of bonded pairs -- restrictions upon on our selection 209 * of physical engines any particular request may be submitted to. 210 * If we receive a submit-fence from a master engine, we will only 211 * use one of sibling_mask physical engines. 212 */ 213 struct ve_bond { 214 const struct intel_engine_cs *master; 215 intel_engine_mask_t sibling_mask; 216 } *bonds; 217 unsigned int num_bonds; 218 219 /* And finally, which physical engines this virtual engine maps onto. */ 220 unsigned int num_siblings; 221 struct intel_engine_cs *siblings[]; 222 }; 223 224 static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) 225 { 226 GEM_BUG_ON(!intel_engine_is_virtual(engine)); 227 return container_of(engine, struct virtual_engine, base); 228 } 229 230 static int __execlists_context_alloc(struct intel_context *ce, 231 struct intel_engine_cs *engine); 232 233 static void execlists_init_reg_state(u32 *reg_state, 234 const struct intel_context *ce, 235 const struct intel_engine_cs *engine, 236 const struct intel_ring *ring, 237 bool close); 238 static void 239 __execlists_update_reg_state(const struct intel_context *ce, 240 const struct intel_engine_cs *engine, 241 u32 head); 242 243 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) 244 { 245 if (INTEL_GEN(engine->i915) >= 12) 246 return 0x60; 247 else if (INTEL_GEN(engine->i915) >= 9) 248 return 0x54; 249 else if (engine->class == RENDER_CLASS) 250 return 0x58; 251 else 252 return -1; 253 } 254 255 static int lrc_ring_gpr0(const struct intel_engine_cs *engine) 256 { 257 if (INTEL_GEN(engine->i915) >= 12) 258 return 0x74; 259 else if (INTEL_GEN(engine->i915) >= 9) 260 return 0x68; 261 else if (engine->class == RENDER_CLASS) 262 return 0xd8; 263 else 264 return -1; 265 } 266 267 static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) 268 { 269 if (INTEL_GEN(engine->i915) >= 12) 270 return 0x12; 271 else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) 272 return 0x18; 273 else 274 return -1; 275 } 276 277 static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) 278 { 279 int x; 280 281 x = lrc_ring_wa_bb_per_ctx(engine); 282 if (x < 0) 283 return x; 284 285 return x + 2; 286 } 287 288 static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) 289 { 290 int x; 291 292 x = lrc_ring_indirect_ptr(engine); 293 if (x < 0) 294 return x; 295 296 return x + 2; 297 } 298 299 static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) 300 { 301 if (engine->class != RENDER_CLASS) 302 return -1; 303 304 if (INTEL_GEN(engine->i915) >= 12) 305 return 0xb6; 306 else if (INTEL_GEN(engine->i915) >= 11) 307 return 0xaa; 308 else 309 return -1; 310 } 311 312 static u32 313 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) 314 { 315 switch (INTEL_GEN(engine->i915)) { 316 default: 317 MISSING_CASE(INTEL_GEN(engine->i915)); 318 fallthrough; 319 case 12: 320 return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 321 case 11: 322 return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 323 case 10: 324 return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 325 case 9: 326 return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 327 case 8: 328 return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; 329 } 330 } 331 332 static void 333 lrc_ring_setup_indirect_ctx(u32 *regs, 334 const struct intel_engine_cs *engine, 335 u32 ctx_bb_ggtt_addr, 336 u32 size) 337 { 338 GEM_BUG_ON(!size); 339 GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); 340 GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); 341 regs[lrc_ring_indirect_ptr(engine) + 1] = 342 ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); 343 344 GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); 345 regs[lrc_ring_indirect_offset(engine) + 1] = 346 lrc_ring_indirect_offset_default(engine) << 6; 347 } 348 349 static u32 intel_context_get_runtime(const struct intel_context *ce) 350 { 351 /* 352 * We can use either ppHWSP[16] which is recorded before the context 353 * switch (and so excludes the cost of context switches) or use the 354 * value from the context image itself, which is saved/restored earlier 355 * and so includes the cost of the save. 356 */ 357 return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); 358 } 359 360 static void mark_eio(struct i915_request *rq) 361 { 362 if (i915_request_completed(rq)) 363 return; 364 365 GEM_BUG_ON(i915_request_signaled(rq)); 366 367 i915_request_set_error_once(rq, -EIO); 368 i915_request_mark_complete(rq); 369 } 370 371 static struct i915_request * 372 active_request(const struct intel_timeline * const tl, struct i915_request *rq) 373 { 374 struct i915_request *active = rq; 375 376 rcu_read_lock(); 377 list_for_each_entry_continue_reverse(rq, &tl->requests, link) { 378 if (i915_request_completed(rq)) 379 break; 380 381 active = rq; 382 } 383 rcu_read_unlock(); 384 385 return active; 386 } 387 388 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) 389 { 390 return (i915_ggtt_offset(engine->status_page.vma) + 391 I915_GEM_HWS_PREEMPT_ADDR); 392 } 393 394 static inline void 395 ring_set_paused(const struct intel_engine_cs *engine, int state) 396 { 397 /* 398 * We inspect HWS_PREEMPT with a semaphore inside 399 * engine->emit_fini_breadcrumb. If the dword is true, 400 * the ring is paused as the semaphore will busywait 401 * until the dword is false. 402 */ 403 engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; 404 if (state) 405 wmb(); 406 } 407 408 static inline struct i915_priolist *to_priolist(struct rb_node *rb) 409 { 410 return rb_entry(rb, struct i915_priolist, node); 411 } 412 413 static inline int rq_prio(const struct i915_request *rq) 414 { 415 return READ_ONCE(rq->sched.attr.priority); 416 } 417 418 static int effective_prio(const struct i915_request *rq) 419 { 420 int prio = rq_prio(rq); 421 422 /* 423 * If this request is special and must not be interrupted at any 424 * cost, so be it. Note we are only checking the most recent request 425 * in the context and so may be masking an earlier vip request. It 426 * is hoped that under the conditions where nopreempt is used, this 427 * will not matter (i.e. all requests to that context will be 428 * nopreempt for as long as desired). 429 */ 430 if (i915_request_has_nopreempt(rq)) 431 prio = I915_PRIORITY_UNPREEMPTABLE; 432 433 return prio; 434 } 435 436 static int queue_prio(const struct intel_engine_execlists *execlists) 437 { 438 struct i915_priolist *p; 439 struct rb_node *rb; 440 441 rb = rb_first_cached(&execlists->queue); 442 if (!rb) 443 return INT_MIN; 444 445 /* 446 * As the priolist[] are inverted, with the highest priority in [0], 447 * we have to flip the index value to become priority. 448 */ 449 p = to_priolist(rb); 450 if (!I915_USER_PRIORITY_SHIFT) 451 return p->priority; 452 453 return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); 454 } 455 456 static inline bool need_preempt(const struct intel_engine_cs *engine, 457 const struct i915_request *rq, 458 struct rb_node *rb) 459 { 460 int last_prio; 461 462 if (!intel_engine_has_semaphores(engine)) 463 return false; 464 465 /* 466 * Check if the current priority hint merits a preemption attempt. 467 * 468 * We record the highest value priority we saw during rescheduling 469 * prior to this dequeue, therefore we know that if it is strictly 470 * less than the current tail of ESLP[0], we do not need to force 471 * a preempt-to-idle cycle. 472 * 473 * However, the priority hint is a mere hint that we may need to 474 * preempt. If that hint is stale or we may be trying to preempt 475 * ourselves, ignore the request. 476 * 477 * More naturally we would write 478 * prio >= max(0, last); 479 * except that we wish to prevent triggering preemption at the same 480 * priority level: the task that is running should remain running 481 * to preserve FIFO ordering of dependencies. 482 */ 483 last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); 484 if (engine->execlists.queue_priority_hint <= last_prio) 485 return false; 486 487 /* 488 * Check against the first request in ELSP[1], it will, thanks to the 489 * power of PI, be the highest priority of that context. 490 */ 491 if (!list_is_last(&rq->sched.link, &engine->active.requests) && 492 rq_prio(list_next_entry(rq, sched.link)) > last_prio) 493 return true; 494 495 if (rb) { 496 struct virtual_engine *ve = 497 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 498 bool preempt = false; 499 500 if (engine == ve->siblings[0]) { /* only preempt one sibling */ 501 struct i915_request *next; 502 503 rcu_read_lock(); 504 next = READ_ONCE(ve->request); 505 if (next) 506 preempt = rq_prio(next) > last_prio; 507 rcu_read_unlock(); 508 } 509 510 if (preempt) 511 return preempt; 512 } 513 514 /* 515 * If the inflight context did not trigger the preemption, then maybe 516 * it was the set of queued requests? Pick the highest priority in 517 * the queue (the first active priolist) and see if it deserves to be 518 * running instead of ELSP[0]. 519 * 520 * The highest priority request in the queue can not be either 521 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same 522 * context, it's priority would not exceed ELSP[0] aka last_prio. 523 */ 524 return queue_prio(&engine->execlists) > last_prio; 525 } 526 527 __maybe_unused static inline bool 528 assert_priority_queue(const struct i915_request *prev, 529 const struct i915_request *next) 530 { 531 /* 532 * Without preemption, the prev may refer to the still active element 533 * which we refuse to let go. 534 * 535 * Even with preemption, there are times when we think it is better not 536 * to preempt and leave an ostensibly lower priority request in flight. 537 */ 538 if (i915_request_is_active(prev)) 539 return true; 540 541 return rq_prio(prev) >= rq_prio(next); 542 } 543 544 /* 545 * The context descriptor encodes various attributes of a context, 546 * including its GTT address and some flags. Because it's fairly 547 * expensive to calculate, we'll just do it once and cache the result, 548 * which remains valid until the context is unpinned. 549 * 550 * This is what a descriptor looks like, from LSB to MSB:: 551 * 552 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) 553 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 554 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) 555 * bits 53-54: mbz, reserved for use by hardware 556 * bits 55-63: group ID, currently unused and set to 0 557 * 558 * Starting from Gen11, the upper dword of the descriptor has a new format: 559 * 560 * bits 32-36: reserved 561 * bits 37-47: SW context ID 562 * bits 48:53: engine instance 563 * bit 54: mbz, reserved for use by hardware 564 * bits 55-60: SW counter 565 * bits 61-63: engine class 566 * 567 * engine info, SW context ID and SW counter need to form a unique number 568 * (Context ID) per lrc. 569 */ 570 static u32 571 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) 572 { 573 u32 desc; 574 575 desc = INTEL_LEGACY_32B_CONTEXT; 576 if (i915_vm_is_4lvl(ce->vm)) 577 desc = INTEL_LEGACY_64B_CONTEXT; 578 desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; 579 580 desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; 581 if (IS_GEN(engine->i915, 8)) 582 desc |= GEN8_CTX_L3LLC_COHERENT; 583 584 return i915_ggtt_offset(ce->state) | desc; 585 } 586 587 static inline unsigned int dword_in_page(void *addr) 588 { 589 return offset_in_page(addr) / sizeof(u32); 590 } 591 592 static void set_offsets(u32 *regs, 593 const u8 *data, 594 const struct intel_engine_cs *engine, 595 bool clear) 596 #define NOP(x) (BIT(7) | (x)) 597 #define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) 598 #define POSTED BIT(0) 599 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) 600 #define REG16(x) \ 601 (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ 602 (((x) >> 2) & 0x7f) 603 #define END(total_state_size) 0, (total_state_size) 604 { 605 const u32 base = engine->mmio_base; 606 607 while (*data) { 608 u8 count, flags; 609 610 if (*data & BIT(7)) { /* skip */ 611 count = *data++ & ~BIT(7); 612 if (clear) 613 memset32(regs, MI_NOOP, count); 614 regs += count; 615 continue; 616 } 617 618 count = *data & 0x3f; 619 flags = *data >> 6; 620 data++; 621 622 *regs = MI_LOAD_REGISTER_IMM(count); 623 if (flags & POSTED) 624 *regs |= MI_LRI_FORCE_POSTED; 625 if (INTEL_GEN(engine->i915) >= 11) 626 *regs |= MI_LRI_LRM_CS_MMIO; 627 regs++; 628 629 GEM_BUG_ON(!count); 630 do { 631 u32 offset = 0; 632 u8 v; 633 634 do { 635 v = *data++; 636 offset <<= 7; 637 offset |= v & ~BIT(7); 638 } while (v & BIT(7)); 639 640 regs[0] = base + (offset << 2); 641 if (clear) 642 regs[1] = 0; 643 regs += 2; 644 } while (--count); 645 } 646 647 if (clear) { 648 u8 count = *++data; 649 650 /* Clear past the tail for HW access */ 651 GEM_BUG_ON(dword_in_page(regs) > count); 652 memset32(regs, MI_NOOP, count - dword_in_page(regs)); 653 654 /* Close the batch; used mainly by live_lrc_layout() */ 655 *regs = MI_BATCH_BUFFER_END; 656 if (INTEL_GEN(engine->i915) >= 10) 657 *regs |= BIT(0); 658 } 659 } 660 661 static const u8 gen8_xcs_offsets[] = { 662 NOP(1), 663 LRI(11, 0), 664 REG16(0x244), 665 REG(0x034), 666 REG(0x030), 667 REG(0x038), 668 REG(0x03c), 669 REG(0x168), 670 REG(0x140), 671 REG(0x110), 672 REG(0x11c), 673 REG(0x114), 674 REG(0x118), 675 676 NOP(9), 677 LRI(9, 0), 678 REG16(0x3a8), 679 REG16(0x28c), 680 REG16(0x288), 681 REG16(0x284), 682 REG16(0x280), 683 REG16(0x27c), 684 REG16(0x278), 685 REG16(0x274), 686 REG16(0x270), 687 688 NOP(13), 689 LRI(2, 0), 690 REG16(0x200), 691 REG(0x028), 692 693 END(80) 694 }; 695 696 static const u8 gen9_xcs_offsets[] = { 697 NOP(1), 698 LRI(14, POSTED), 699 REG16(0x244), 700 REG(0x034), 701 REG(0x030), 702 REG(0x038), 703 REG(0x03c), 704 REG(0x168), 705 REG(0x140), 706 REG(0x110), 707 REG(0x11c), 708 REG(0x114), 709 REG(0x118), 710 REG(0x1c0), 711 REG(0x1c4), 712 REG(0x1c8), 713 714 NOP(3), 715 LRI(9, POSTED), 716 REG16(0x3a8), 717 REG16(0x28c), 718 REG16(0x288), 719 REG16(0x284), 720 REG16(0x280), 721 REG16(0x27c), 722 REG16(0x278), 723 REG16(0x274), 724 REG16(0x270), 725 726 NOP(13), 727 LRI(1, POSTED), 728 REG16(0x200), 729 730 NOP(13), 731 LRI(44, POSTED), 732 REG(0x028), 733 REG(0x09c), 734 REG(0x0c0), 735 REG(0x178), 736 REG(0x17c), 737 REG16(0x358), 738 REG(0x170), 739 REG(0x150), 740 REG(0x154), 741 REG(0x158), 742 REG16(0x41c), 743 REG16(0x600), 744 REG16(0x604), 745 REG16(0x608), 746 REG16(0x60c), 747 REG16(0x610), 748 REG16(0x614), 749 REG16(0x618), 750 REG16(0x61c), 751 REG16(0x620), 752 REG16(0x624), 753 REG16(0x628), 754 REG16(0x62c), 755 REG16(0x630), 756 REG16(0x634), 757 REG16(0x638), 758 REG16(0x63c), 759 REG16(0x640), 760 REG16(0x644), 761 REG16(0x648), 762 REG16(0x64c), 763 REG16(0x650), 764 REG16(0x654), 765 REG16(0x658), 766 REG16(0x65c), 767 REG16(0x660), 768 REG16(0x664), 769 REG16(0x668), 770 REG16(0x66c), 771 REG16(0x670), 772 REG16(0x674), 773 REG16(0x678), 774 REG16(0x67c), 775 REG(0x068), 776 777 END(176) 778 }; 779 780 static const u8 gen12_xcs_offsets[] = { 781 NOP(1), 782 LRI(13, POSTED), 783 REG16(0x244), 784 REG(0x034), 785 REG(0x030), 786 REG(0x038), 787 REG(0x03c), 788 REG(0x168), 789 REG(0x140), 790 REG(0x110), 791 REG(0x1c0), 792 REG(0x1c4), 793 REG(0x1c8), 794 REG(0x180), 795 REG16(0x2b4), 796 797 NOP(5), 798 LRI(9, POSTED), 799 REG16(0x3a8), 800 REG16(0x28c), 801 REG16(0x288), 802 REG16(0x284), 803 REG16(0x280), 804 REG16(0x27c), 805 REG16(0x278), 806 REG16(0x274), 807 REG16(0x270), 808 809 END(80) 810 }; 811 812 static const u8 gen8_rcs_offsets[] = { 813 NOP(1), 814 LRI(14, POSTED), 815 REG16(0x244), 816 REG(0x034), 817 REG(0x030), 818 REG(0x038), 819 REG(0x03c), 820 REG(0x168), 821 REG(0x140), 822 REG(0x110), 823 REG(0x11c), 824 REG(0x114), 825 REG(0x118), 826 REG(0x1c0), 827 REG(0x1c4), 828 REG(0x1c8), 829 830 NOP(3), 831 LRI(9, POSTED), 832 REG16(0x3a8), 833 REG16(0x28c), 834 REG16(0x288), 835 REG16(0x284), 836 REG16(0x280), 837 REG16(0x27c), 838 REG16(0x278), 839 REG16(0x274), 840 REG16(0x270), 841 842 NOP(13), 843 LRI(1, 0), 844 REG(0x0c8), 845 846 END(80) 847 }; 848 849 static const u8 gen9_rcs_offsets[] = { 850 NOP(1), 851 LRI(14, POSTED), 852 REG16(0x244), 853 REG(0x34), 854 REG(0x30), 855 REG(0x38), 856 REG(0x3c), 857 REG(0x168), 858 REG(0x140), 859 REG(0x110), 860 REG(0x11c), 861 REG(0x114), 862 REG(0x118), 863 REG(0x1c0), 864 REG(0x1c4), 865 REG(0x1c8), 866 867 NOP(3), 868 LRI(9, POSTED), 869 REG16(0x3a8), 870 REG16(0x28c), 871 REG16(0x288), 872 REG16(0x284), 873 REG16(0x280), 874 REG16(0x27c), 875 REG16(0x278), 876 REG16(0x274), 877 REG16(0x270), 878 879 NOP(13), 880 LRI(1, 0), 881 REG(0xc8), 882 883 NOP(13), 884 LRI(44, POSTED), 885 REG(0x28), 886 REG(0x9c), 887 REG(0xc0), 888 REG(0x178), 889 REG(0x17c), 890 REG16(0x358), 891 REG(0x170), 892 REG(0x150), 893 REG(0x154), 894 REG(0x158), 895 REG16(0x41c), 896 REG16(0x600), 897 REG16(0x604), 898 REG16(0x608), 899 REG16(0x60c), 900 REG16(0x610), 901 REG16(0x614), 902 REG16(0x618), 903 REG16(0x61c), 904 REG16(0x620), 905 REG16(0x624), 906 REG16(0x628), 907 REG16(0x62c), 908 REG16(0x630), 909 REG16(0x634), 910 REG16(0x638), 911 REG16(0x63c), 912 REG16(0x640), 913 REG16(0x644), 914 REG16(0x648), 915 REG16(0x64c), 916 REG16(0x650), 917 REG16(0x654), 918 REG16(0x658), 919 REG16(0x65c), 920 REG16(0x660), 921 REG16(0x664), 922 REG16(0x668), 923 REG16(0x66c), 924 REG16(0x670), 925 REG16(0x674), 926 REG16(0x678), 927 REG16(0x67c), 928 REG(0x68), 929 930 END(176) 931 }; 932 933 static const u8 gen11_rcs_offsets[] = { 934 NOP(1), 935 LRI(15, POSTED), 936 REG16(0x244), 937 REG(0x034), 938 REG(0x030), 939 REG(0x038), 940 REG(0x03c), 941 REG(0x168), 942 REG(0x140), 943 REG(0x110), 944 REG(0x11c), 945 REG(0x114), 946 REG(0x118), 947 REG(0x1c0), 948 REG(0x1c4), 949 REG(0x1c8), 950 REG(0x180), 951 952 NOP(1), 953 LRI(9, POSTED), 954 REG16(0x3a8), 955 REG16(0x28c), 956 REG16(0x288), 957 REG16(0x284), 958 REG16(0x280), 959 REG16(0x27c), 960 REG16(0x278), 961 REG16(0x274), 962 REG16(0x270), 963 964 LRI(1, POSTED), 965 REG(0x1b0), 966 967 NOP(10), 968 LRI(1, 0), 969 REG(0x0c8), 970 971 END(80) 972 }; 973 974 static const u8 gen12_rcs_offsets[] = { 975 NOP(1), 976 LRI(13, POSTED), 977 REG16(0x244), 978 REG(0x034), 979 REG(0x030), 980 REG(0x038), 981 REG(0x03c), 982 REG(0x168), 983 REG(0x140), 984 REG(0x110), 985 REG(0x1c0), 986 REG(0x1c4), 987 REG(0x1c8), 988 REG(0x180), 989 REG16(0x2b4), 990 991 NOP(5), 992 LRI(9, POSTED), 993 REG16(0x3a8), 994 REG16(0x28c), 995 REG16(0x288), 996 REG16(0x284), 997 REG16(0x280), 998 REG16(0x27c), 999 REG16(0x278), 1000 REG16(0x274), 1001 REG16(0x270), 1002 1003 LRI(3, POSTED), 1004 REG(0x1b0), 1005 REG16(0x5a8), 1006 REG16(0x5ac), 1007 1008 NOP(6), 1009 LRI(1, 0), 1010 REG(0x0c8), 1011 NOP(3 + 9 + 1), 1012 1013 LRI(51, POSTED), 1014 REG16(0x588), 1015 REG16(0x588), 1016 REG16(0x588), 1017 REG16(0x588), 1018 REG16(0x588), 1019 REG16(0x588), 1020 REG(0x028), 1021 REG(0x09c), 1022 REG(0x0c0), 1023 REG(0x178), 1024 REG(0x17c), 1025 REG16(0x358), 1026 REG(0x170), 1027 REG(0x150), 1028 REG(0x154), 1029 REG(0x158), 1030 REG16(0x41c), 1031 REG16(0x600), 1032 REG16(0x604), 1033 REG16(0x608), 1034 REG16(0x60c), 1035 REG16(0x610), 1036 REG16(0x614), 1037 REG16(0x618), 1038 REG16(0x61c), 1039 REG16(0x620), 1040 REG16(0x624), 1041 REG16(0x628), 1042 REG16(0x62c), 1043 REG16(0x630), 1044 REG16(0x634), 1045 REG16(0x638), 1046 REG16(0x63c), 1047 REG16(0x640), 1048 REG16(0x644), 1049 REG16(0x648), 1050 REG16(0x64c), 1051 REG16(0x650), 1052 REG16(0x654), 1053 REG16(0x658), 1054 REG16(0x65c), 1055 REG16(0x660), 1056 REG16(0x664), 1057 REG16(0x668), 1058 REG16(0x66c), 1059 REG16(0x670), 1060 REG16(0x674), 1061 REG16(0x678), 1062 REG16(0x67c), 1063 REG(0x068), 1064 REG(0x084), 1065 NOP(1), 1066 1067 END(192) 1068 }; 1069 1070 #undef END 1071 #undef REG16 1072 #undef REG 1073 #undef LRI 1074 #undef NOP 1075 1076 static const u8 *reg_offsets(const struct intel_engine_cs *engine) 1077 { 1078 /* 1079 * The gen12+ lists only have the registers we program in the basic 1080 * default state. We rely on the context image using relative 1081 * addressing to automatic fixup the register state between the 1082 * physical engines for virtual engine. 1083 */ 1084 GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && 1085 !intel_engine_has_relative_mmio(engine)); 1086 1087 if (engine->class == RENDER_CLASS) { 1088 if (INTEL_GEN(engine->i915) >= 12) 1089 return gen12_rcs_offsets; 1090 else if (INTEL_GEN(engine->i915) >= 11) 1091 return gen11_rcs_offsets; 1092 else if (INTEL_GEN(engine->i915) >= 9) 1093 return gen9_rcs_offsets; 1094 else 1095 return gen8_rcs_offsets; 1096 } else { 1097 if (INTEL_GEN(engine->i915) >= 12) 1098 return gen12_xcs_offsets; 1099 else if (INTEL_GEN(engine->i915) >= 9) 1100 return gen9_xcs_offsets; 1101 else 1102 return gen8_xcs_offsets; 1103 } 1104 } 1105 1106 static struct i915_request * 1107 __unwind_incomplete_requests(struct intel_engine_cs *engine) 1108 { 1109 struct i915_request *rq, *rn, *active = NULL; 1110 struct list_head *pl; 1111 int prio = I915_PRIORITY_INVALID; 1112 1113 lockdep_assert_held(&engine->active.lock); 1114 1115 list_for_each_entry_safe_reverse(rq, rn, 1116 &engine->active.requests, 1117 sched.link) { 1118 if (i915_request_completed(rq)) 1119 continue; /* XXX */ 1120 1121 __i915_request_unsubmit(rq); 1122 1123 /* 1124 * Push the request back into the queue for later resubmission. 1125 * If this request is not native to this physical engine (i.e. 1126 * it came from a virtual source), push it back onto the virtual 1127 * engine so that it can be moved across onto another physical 1128 * engine as load dictates. 1129 */ 1130 if (likely(rq->execution_mask == engine->mask)) { 1131 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); 1132 if (rq_prio(rq) != prio) { 1133 prio = rq_prio(rq); 1134 pl = i915_sched_lookup_priolist(engine, prio); 1135 } 1136 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 1137 1138 list_move(&rq->sched.link, pl); 1139 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 1140 1141 /* Check in case we rollback so far we wrap [size/2] */ 1142 if (intel_ring_direction(rq->ring, 1143 intel_ring_wrap(rq->ring, 1144 rq->tail), 1145 rq->ring->tail) > 0) 1146 rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; 1147 1148 active = rq; 1149 } else { 1150 struct intel_engine_cs *owner = rq->context->engine; 1151 1152 WRITE_ONCE(rq->engine, owner); 1153 owner->submit_request(rq); 1154 active = NULL; 1155 } 1156 } 1157 1158 return active; 1159 } 1160 1161 struct i915_request * 1162 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) 1163 { 1164 struct intel_engine_cs *engine = 1165 container_of(execlists, typeof(*engine), execlists); 1166 1167 return __unwind_incomplete_requests(engine); 1168 } 1169 1170 static inline void 1171 execlists_context_status_change(struct i915_request *rq, unsigned long status) 1172 { 1173 /* 1174 * Only used when GVT-g is enabled now. When GVT-g is disabled, 1175 * The compiler should eliminate this function as dead-code. 1176 */ 1177 if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) 1178 return; 1179 1180 atomic_notifier_call_chain(&rq->engine->context_status_notifier, 1181 status, rq); 1182 } 1183 1184 static void intel_engine_context_in(struct intel_engine_cs *engine) 1185 { 1186 unsigned long flags; 1187 1188 if (atomic_add_unless(&engine->stats.active, 1, 0)) 1189 return; 1190 1191 write_seqlock_irqsave(&engine->stats.lock, flags); 1192 if (!atomic_add_unless(&engine->stats.active, 1, 0)) { 1193 engine->stats.start = ktime_get(); 1194 atomic_inc(&engine->stats.active); 1195 } 1196 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1197 } 1198 1199 static void intel_engine_context_out(struct intel_engine_cs *engine) 1200 { 1201 unsigned long flags; 1202 1203 GEM_BUG_ON(!atomic_read(&engine->stats.active)); 1204 1205 if (atomic_add_unless(&engine->stats.active, -1, 1)) 1206 return; 1207 1208 write_seqlock_irqsave(&engine->stats.lock, flags); 1209 if (atomic_dec_and_test(&engine->stats.active)) { 1210 engine->stats.total = 1211 ktime_add(engine->stats.total, 1212 ktime_sub(ktime_get(), engine->stats.start)); 1213 } 1214 write_sequnlock_irqrestore(&engine->stats.lock, flags); 1215 } 1216 1217 static void 1218 execlists_check_context(const struct intel_context *ce, 1219 const struct intel_engine_cs *engine) 1220 { 1221 const struct intel_ring *ring = ce->ring; 1222 u32 *regs = ce->lrc_reg_state; 1223 bool valid = true; 1224 int x; 1225 1226 if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { 1227 pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", 1228 engine->name, 1229 regs[CTX_RING_START], 1230 i915_ggtt_offset(ring->vma)); 1231 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 1232 valid = false; 1233 } 1234 1235 if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != 1236 (RING_CTL_SIZE(ring->size) | RING_VALID)) { 1237 pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", 1238 engine->name, 1239 regs[CTX_RING_CTL], 1240 (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); 1241 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 1242 valid = false; 1243 } 1244 1245 x = lrc_ring_mi_mode(engine); 1246 if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { 1247 pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", 1248 engine->name, regs[x + 1]); 1249 regs[x + 1] &= ~STOP_RING; 1250 regs[x + 1] |= STOP_RING << 16; 1251 valid = false; 1252 } 1253 1254 WARN_ONCE(!valid, "Invalid lrc state found before submission\n"); 1255 } 1256 1257 static void restore_default_state(struct intel_context *ce, 1258 struct intel_engine_cs *engine) 1259 { 1260 u32 *regs; 1261 1262 regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE); 1263 execlists_init_reg_state(regs, ce, engine, ce->ring, true); 1264 1265 ce->runtime.last = intel_context_get_runtime(ce); 1266 } 1267 1268 static void reset_active(struct i915_request *rq, 1269 struct intel_engine_cs *engine) 1270 { 1271 struct intel_context * const ce = rq->context; 1272 u32 head; 1273 1274 /* 1275 * The executing context has been cancelled. We want to prevent 1276 * further execution along this context and propagate the error on 1277 * to anything depending on its results. 1278 * 1279 * In __i915_request_submit(), we apply the -EIO and remove the 1280 * requests' payloads for any banned requests. But first, we must 1281 * rewind the context back to the start of the incomplete request so 1282 * that we do not jump back into the middle of the batch. 1283 * 1284 * We preserve the breadcrumbs and semaphores of the incomplete 1285 * requests so that inter-timeline dependencies (i.e other timelines) 1286 * remain correctly ordered. And we defer to __i915_request_submit() 1287 * so that all asynchronous waits are correctly handled. 1288 */ 1289 ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", 1290 rq->fence.context, rq->fence.seqno); 1291 1292 /* On resubmission of the active request, payload will be scrubbed */ 1293 if (i915_request_completed(rq)) 1294 head = rq->tail; 1295 else 1296 head = active_request(ce->timeline, rq)->head; 1297 head = intel_ring_wrap(ce->ring, head); 1298 1299 /* Scrub the context image to prevent replaying the previous batch */ 1300 restore_default_state(ce, engine); 1301 __execlists_update_reg_state(ce, engine, head); 1302 1303 /* We've switched away, so this should be a no-op, but intent matters */ 1304 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; 1305 } 1306 1307 static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) 1308 { 1309 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1310 ce->runtime.num_underflow += dt < 0; 1311 ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); 1312 #endif 1313 } 1314 1315 static void intel_context_update_runtime(struct intel_context *ce) 1316 { 1317 u32 old; 1318 s32 dt; 1319 1320 if (intel_context_is_barrier(ce)) 1321 return; 1322 1323 old = ce->runtime.last; 1324 ce->runtime.last = intel_context_get_runtime(ce); 1325 dt = ce->runtime.last - old; 1326 1327 if (unlikely(dt <= 0)) { 1328 CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", 1329 old, ce->runtime.last, dt); 1330 st_update_runtime_underflow(ce, dt); 1331 return; 1332 } 1333 1334 ewma_runtime_add(&ce->runtime.avg, dt); 1335 ce->runtime.total += dt; 1336 } 1337 1338 static inline struct intel_engine_cs * 1339 __execlists_schedule_in(struct i915_request *rq) 1340 { 1341 struct intel_engine_cs * const engine = rq->engine; 1342 struct intel_context * const ce = rq->context; 1343 1344 intel_context_get(ce); 1345 1346 if (unlikely(intel_context_is_banned(ce))) 1347 reset_active(rq, engine); 1348 1349 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 1350 execlists_check_context(ce, engine); 1351 1352 if (ce->tag) { 1353 /* Use a fixed tag for OA and friends */ 1354 GEM_BUG_ON(ce->tag <= BITS_PER_LONG); 1355 ce->lrc.ccid = ce->tag; 1356 } else { 1357 /* We don't need a strict matching tag, just different values */ 1358 unsigned int tag = ffs(READ_ONCE(engine->context_tag)); 1359 1360 GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); 1361 clear_bit(tag - 1, &engine->context_tag); 1362 ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); 1363 1364 BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); 1365 } 1366 1367 ce->lrc.ccid |= engine->execlists.ccid; 1368 1369 __intel_gt_pm_get(engine->gt); 1370 if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active)) 1371 intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); 1372 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); 1373 intel_engine_context_in(engine); 1374 1375 return engine; 1376 } 1377 1378 static inline struct i915_request * 1379 execlists_schedule_in(struct i915_request *rq, int idx) 1380 { 1381 struct intel_context * const ce = rq->context; 1382 struct intel_engine_cs *old; 1383 1384 GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); 1385 trace_i915_request_in(rq, idx); 1386 1387 old = READ_ONCE(ce->inflight); 1388 do { 1389 if (!old) { 1390 WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq)); 1391 break; 1392 } 1393 } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old))); 1394 1395 GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); 1396 return i915_request_get(rq); 1397 } 1398 1399 static void kick_siblings(struct i915_request *rq, struct intel_context *ce) 1400 { 1401 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 1402 struct i915_request *next = READ_ONCE(ve->request); 1403 1404 if (next == rq || (next && next->execution_mask & ~rq->execution_mask)) 1405 tasklet_hi_schedule(&ve->base.execlists.tasklet); 1406 } 1407 1408 static inline void 1409 __execlists_schedule_out(struct i915_request *rq, 1410 struct intel_engine_cs * const engine, 1411 unsigned int ccid) 1412 { 1413 struct intel_context * const ce = rq->context; 1414 1415 /* 1416 * NB process_csb() is not under the engine->active.lock and hence 1417 * schedule_out can race with schedule_in meaning that we should 1418 * refrain from doing non-trivial work here. 1419 */ 1420 1421 /* 1422 * If we have just completed this context, the engine may now be 1423 * idle and we want to re-enter powersaving. 1424 */ 1425 if (list_is_last_rcu(&rq->link, &ce->timeline->requests) && 1426 i915_request_completed(rq)) 1427 intel_engine_add_retire(engine, ce->timeline); 1428 1429 ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; 1430 ccid &= GEN12_MAX_CONTEXT_HW_ID; 1431 if (ccid < BITS_PER_LONG) { 1432 GEM_BUG_ON(ccid == 0); 1433 GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); 1434 set_bit(ccid - 1, &engine->context_tag); 1435 } 1436 1437 intel_context_update_runtime(ce); 1438 intel_engine_context_out(engine); 1439 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); 1440 if (engine->fw_domain && !atomic_dec_return(&engine->fw_active)) 1441 intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); 1442 intel_gt_pm_put_async(engine->gt); 1443 1444 /* 1445 * If this is part of a virtual engine, its next request may 1446 * have been blocked waiting for access to the active context. 1447 * We have to kick all the siblings again in case we need to 1448 * switch (e.g. the next request is not runnable on this 1449 * engine). Hopefully, we will already have submitted the next 1450 * request before the tasklet runs and do not need to rebuild 1451 * each virtual tree and kick everyone again. 1452 */ 1453 if (ce->engine != engine) 1454 kick_siblings(rq, ce); 1455 1456 intel_context_put(ce); 1457 } 1458 1459 static inline void 1460 execlists_schedule_out(struct i915_request *rq) 1461 { 1462 struct intel_context * const ce = rq->context; 1463 struct intel_engine_cs *cur, *old; 1464 u32 ccid; 1465 1466 trace_i915_request_out(rq); 1467 1468 ccid = rq->context->lrc.ccid; 1469 old = READ_ONCE(ce->inflight); 1470 do 1471 cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; 1472 while (!try_cmpxchg(&ce->inflight, &old, cur)); 1473 if (!cur) 1474 __execlists_schedule_out(rq, old, ccid); 1475 1476 i915_request_put(rq); 1477 } 1478 1479 static u64 execlists_update_context(struct i915_request *rq) 1480 { 1481 struct intel_context *ce = rq->context; 1482 u64 desc = ce->lrc.desc; 1483 u32 tail, prev; 1484 1485 /* 1486 * WaIdleLiteRestore:bdw,skl 1487 * 1488 * We should never submit the context with the same RING_TAIL twice 1489 * just in case we submit an empty ring, which confuses the HW. 1490 * 1491 * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of 1492 * the normal request to be able to always advance the RING_TAIL on 1493 * subsequent resubmissions (for lite restore). Should that fail us, 1494 * and we try and submit the same tail again, force the context 1495 * reload. 1496 * 1497 * If we need to return to a preempted context, we need to skip the 1498 * lite-restore and force it to reload the RING_TAIL. Otherwise, the 1499 * HW has a tendency to ignore us rewinding the TAIL to the end of 1500 * an earlier request. 1501 */ 1502 GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); 1503 prev = rq->ring->tail; 1504 tail = intel_ring_set_tail(rq->ring, rq->tail); 1505 if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) 1506 desc |= CTX_DESC_FORCE_RESTORE; 1507 ce->lrc_reg_state[CTX_RING_TAIL] = tail; 1508 rq->tail = rq->wa_tail; 1509 1510 /* 1511 * Make sure the context image is complete before we submit it to HW. 1512 * 1513 * Ostensibly, writes (including the WCB) should be flushed prior to 1514 * an uncached write such as our mmio register access, the empirical 1515 * evidence (esp. on Braswell) suggests that the WC write into memory 1516 * may not be visible to the HW prior to the completion of the UC 1517 * register write and that we may begin execution from the context 1518 * before its image is complete leading to invalid PD chasing. 1519 */ 1520 wmb(); 1521 1522 ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; 1523 return desc; 1524 } 1525 1526 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) 1527 { 1528 if (execlists->ctrl_reg) { 1529 writel(lower_32_bits(desc), execlists->submit_reg + port * 2); 1530 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); 1531 } else { 1532 writel(upper_32_bits(desc), execlists->submit_reg); 1533 writel(lower_32_bits(desc), execlists->submit_reg); 1534 } 1535 } 1536 1537 static __maybe_unused char * 1538 dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) 1539 { 1540 if (!rq) 1541 return ""; 1542 1543 snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", 1544 prefix, 1545 rq->context->lrc.ccid, 1546 rq->fence.context, rq->fence.seqno, 1547 i915_request_completed(rq) ? "!" : 1548 i915_request_started(rq) ? "*" : 1549 "", 1550 rq_prio(rq)); 1551 1552 return buf; 1553 } 1554 1555 static __maybe_unused void 1556 trace_ports(const struct intel_engine_execlists *execlists, 1557 const char *msg, 1558 struct i915_request * const *ports) 1559 { 1560 const struct intel_engine_cs *engine = 1561 container_of(execlists, typeof(*engine), execlists); 1562 char __maybe_unused p0[40], p1[40]; 1563 1564 if (!ports[0]) 1565 return; 1566 1567 ENGINE_TRACE(engine, "%s { %s%s }\n", msg, 1568 dump_port(p0, sizeof(p0), "", ports[0]), 1569 dump_port(p1, sizeof(p1), ", ", ports[1])); 1570 } 1571 1572 static inline bool 1573 reset_in_progress(const struct intel_engine_execlists *execlists) 1574 { 1575 return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); 1576 } 1577 1578 static __maybe_unused bool 1579 assert_pending_valid(const struct intel_engine_execlists *execlists, 1580 const char *msg) 1581 { 1582 struct intel_engine_cs *engine = 1583 container_of(execlists, typeof(*engine), execlists); 1584 struct i915_request * const *port, *rq; 1585 struct intel_context *ce = NULL; 1586 bool sentinel = false; 1587 u32 ccid = -1; 1588 1589 trace_ports(execlists, msg, execlists->pending); 1590 1591 /* We may be messing around with the lists during reset, lalala */ 1592 if (reset_in_progress(execlists)) 1593 return true; 1594 1595 if (!execlists->pending[0]) { 1596 GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", 1597 engine->name); 1598 return false; 1599 } 1600 1601 if (execlists->pending[execlists_num_ports(execlists)]) { 1602 GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", 1603 engine->name, execlists_num_ports(execlists)); 1604 return false; 1605 } 1606 1607 for (port = execlists->pending; (rq = *port); port++) { 1608 unsigned long flags; 1609 bool ok = true; 1610 1611 GEM_BUG_ON(!kref_read(&rq->fence.refcount)); 1612 GEM_BUG_ON(!i915_request_is_active(rq)); 1613 1614 if (ce == rq->context) { 1615 GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", 1616 engine->name, 1617 ce->timeline->fence_context, 1618 port - execlists->pending); 1619 return false; 1620 } 1621 ce = rq->context; 1622 1623 if (ccid == ce->lrc.ccid) { 1624 GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", 1625 engine->name, 1626 ccid, ce->timeline->fence_context, 1627 port - execlists->pending); 1628 return false; 1629 } 1630 ccid = ce->lrc.ccid; 1631 1632 /* 1633 * Sentinels are supposed to be the last request so they flush 1634 * the current execution off the HW. Check that they are the only 1635 * request in the pending submission. 1636 */ 1637 if (sentinel) { 1638 GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", 1639 engine->name, 1640 ce->timeline->fence_context, 1641 port - execlists->pending); 1642 return false; 1643 } 1644 sentinel = i915_request_has_sentinel(rq); 1645 1646 /* Hold tightly onto the lock to prevent concurrent retires! */ 1647 if (!spin_trylock_irqsave(&rq->lock, flags)) 1648 continue; 1649 1650 if (i915_request_completed(rq)) 1651 goto unlock; 1652 1653 if (i915_active_is_idle(&ce->active) && 1654 !intel_context_is_barrier(ce)) { 1655 GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", 1656 engine->name, 1657 ce->timeline->fence_context, 1658 port - execlists->pending); 1659 ok = false; 1660 goto unlock; 1661 } 1662 1663 if (!i915_vma_is_pinned(ce->state)) { 1664 GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", 1665 engine->name, 1666 ce->timeline->fence_context, 1667 port - execlists->pending); 1668 ok = false; 1669 goto unlock; 1670 } 1671 1672 if (!i915_vma_is_pinned(ce->ring->vma)) { 1673 GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", 1674 engine->name, 1675 ce->timeline->fence_context, 1676 port - execlists->pending); 1677 ok = false; 1678 goto unlock; 1679 } 1680 1681 unlock: 1682 spin_unlock_irqrestore(&rq->lock, flags); 1683 if (!ok) 1684 return false; 1685 } 1686 1687 return ce; 1688 } 1689 1690 static void execlists_submit_ports(struct intel_engine_cs *engine) 1691 { 1692 struct intel_engine_execlists *execlists = &engine->execlists; 1693 unsigned int n; 1694 1695 GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); 1696 1697 /* 1698 * We can skip acquiring intel_runtime_pm_get() here as it was taken 1699 * on our behalf by the request (see i915_gem_mark_busy()) and it will 1700 * not be relinquished until the device is idle (see 1701 * i915_gem_idle_work_handler()). As a precaution, we make sure 1702 * that all ELSP are drained i.e. we have processed the CSB, 1703 * before allowing ourselves to idle and calling intel_runtime_pm_put(). 1704 */ 1705 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 1706 1707 /* 1708 * ELSQ note: the submit queue is not cleared after being submitted 1709 * to the HW so we need to make sure we always clean it up. This is 1710 * currently ensured by the fact that we always write the same number 1711 * of elsq entries, keep this in mind before changing the loop below. 1712 */ 1713 for (n = execlists_num_ports(execlists); n--; ) { 1714 struct i915_request *rq = execlists->pending[n]; 1715 1716 write_desc(execlists, 1717 rq ? execlists_update_context(rq) : 0, 1718 n); 1719 } 1720 1721 /* we need to manually load the submit queue */ 1722 if (execlists->ctrl_reg) 1723 writel(EL_CTRL_LOAD, execlists->ctrl_reg); 1724 } 1725 1726 static bool ctx_single_port_submission(const struct intel_context *ce) 1727 { 1728 return (IS_ENABLED(CONFIG_DRM_I915_GVT) && 1729 intel_context_force_single_submission(ce)); 1730 } 1731 1732 static bool can_merge_ctx(const struct intel_context *prev, 1733 const struct intel_context *next) 1734 { 1735 if (prev != next) 1736 return false; 1737 1738 if (ctx_single_port_submission(prev)) 1739 return false; 1740 1741 return true; 1742 } 1743 1744 static unsigned long i915_request_flags(const struct i915_request *rq) 1745 { 1746 return READ_ONCE(rq->fence.flags); 1747 } 1748 1749 static bool can_merge_rq(const struct i915_request *prev, 1750 const struct i915_request *next) 1751 { 1752 GEM_BUG_ON(prev == next); 1753 GEM_BUG_ON(!assert_priority_queue(prev, next)); 1754 1755 /* 1756 * We do not submit known completed requests. Therefore if the next 1757 * request is already completed, we can pretend to merge it in 1758 * with the previous context (and we will skip updating the ELSP 1759 * and tracking). Thus hopefully keeping the ELSP full with active 1760 * contexts, despite the best efforts of preempt-to-busy to confuse 1761 * us. 1762 */ 1763 if (i915_request_completed(next)) 1764 return true; 1765 1766 if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) & 1767 (BIT(I915_FENCE_FLAG_NOPREEMPT) | 1768 BIT(I915_FENCE_FLAG_SENTINEL)))) 1769 return false; 1770 1771 if (!can_merge_ctx(prev->context, next->context)) 1772 return false; 1773 1774 GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); 1775 return true; 1776 } 1777 1778 static void virtual_update_register_offsets(u32 *regs, 1779 struct intel_engine_cs *engine) 1780 { 1781 set_offsets(regs, reg_offsets(engine), engine, false); 1782 } 1783 1784 static bool virtual_matches(const struct virtual_engine *ve, 1785 const struct i915_request *rq, 1786 const struct intel_engine_cs *engine) 1787 { 1788 const struct intel_engine_cs *inflight; 1789 1790 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ 1791 return false; 1792 1793 /* 1794 * We track when the HW has completed saving the context image 1795 * (i.e. when we have seen the final CS event switching out of 1796 * the context) and must not overwrite the context image before 1797 * then. This restricts us to only using the active engine 1798 * while the previous virtualized request is inflight (so 1799 * we reuse the register offsets). This is a very small 1800 * hystersis on the greedy seelction algorithm. 1801 */ 1802 inflight = intel_context_inflight(&ve->context); 1803 if (inflight && inflight != engine) 1804 return false; 1805 1806 return true; 1807 } 1808 1809 static void virtual_xfer_context(struct virtual_engine *ve, 1810 struct intel_engine_cs *engine) 1811 { 1812 unsigned int n; 1813 1814 if (likely(engine == ve->siblings[0])) 1815 return; 1816 1817 GEM_BUG_ON(READ_ONCE(ve->context.inflight)); 1818 if (!intel_engine_has_relative_mmio(engine)) 1819 virtual_update_register_offsets(ve->context.lrc_reg_state, 1820 engine); 1821 1822 /* 1823 * Move the bound engine to the top of the list for 1824 * future execution. We then kick this tasklet first 1825 * before checking others, so that we preferentially 1826 * reuse this set of bound registers. 1827 */ 1828 for (n = 1; n < ve->num_siblings; n++) { 1829 if (ve->siblings[n] == engine) { 1830 swap(ve->siblings[n], ve->siblings[0]); 1831 break; 1832 } 1833 } 1834 } 1835 1836 #define for_each_waiter(p__, rq__) \ 1837 list_for_each_entry_lockless(p__, \ 1838 &(rq__)->sched.waiters_list, \ 1839 wait_link) 1840 1841 #define for_each_signaler(p__, rq__) \ 1842 list_for_each_entry_rcu(p__, \ 1843 &(rq__)->sched.signalers_list, \ 1844 signal_link) 1845 1846 static void defer_request(struct i915_request *rq, struct list_head * const pl) 1847 { 1848 LIST_HEAD(list); 1849 1850 /* 1851 * We want to move the interrupted request to the back of 1852 * the round-robin list (i.e. its priority level), but 1853 * in doing so, we must then move all requests that were in 1854 * flight and were waiting for the interrupted request to 1855 * be run after it again. 1856 */ 1857 do { 1858 struct i915_dependency *p; 1859 1860 GEM_BUG_ON(i915_request_is_active(rq)); 1861 list_move_tail(&rq->sched.link, pl); 1862 1863 for_each_waiter(p, rq) { 1864 struct i915_request *w = 1865 container_of(p->waiter, typeof(*w), sched); 1866 1867 if (p->flags & I915_DEPENDENCY_WEAK) 1868 continue; 1869 1870 /* Leave semaphores spinning on the other engines */ 1871 if (w->engine != rq->engine) 1872 continue; 1873 1874 /* No waiter should start before its signaler */ 1875 GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) && 1876 i915_request_started(w) && 1877 !i915_request_completed(rq)); 1878 1879 GEM_BUG_ON(i915_request_is_active(w)); 1880 if (!i915_request_is_ready(w)) 1881 continue; 1882 1883 if (rq_prio(w) < rq_prio(rq)) 1884 continue; 1885 1886 GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); 1887 list_move_tail(&w->sched.link, &list); 1888 } 1889 1890 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 1891 } while (rq); 1892 } 1893 1894 static void defer_active(struct intel_engine_cs *engine) 1895 { 1896 struct i915_request *rq; 1897 1898 rq = __unwind_incomplete_requests(engine); 1899 if (!rq) 1900 return; 1901 1902 defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); 1903 } 1904 1905 static bool 1906 need_timeslice(const struct intel_engine_cs *engine, 1907 const struct i915_request *rq, 1908 const struct rb_node *rb) 1909 { 1910 int hint; 1911 1912 if (!intel_engine_has_timeslices(engine)) 1913 return false; 1914 1915 hint = engine->execlists.queue_priority_hint; 1916 1917 if (rb) { 1918 const struct virtual_engine *ve = 1919 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 1920 const struct intel_engine_cs *inflight = 1921 intel_context_inflight(&ve->context); 1922 1923 if (!inflight || inflight == engine) { 1924 struct i915_request *next; 1925 1926 rcu_read_lock(); 1927 next = READ_ONCE(ve->request); 1928 if (next) 1929 hint = max(hint, rq_prio(next)); 1930 rcu_read_unlock(); 1931 } 1932 } 1933 1934 if (!list_is_last(&rq->sched.link, &engine->active.requests)) 1935 hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); 1936 1937 GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE); 1938 return hint >= effective_prio(rq); 1939 } 1940 1941 static bool 1942 timeslice_yield(const struct intel_engine_execlists *el, 1943 const struct i915_request *rq) 1944 { 1945 /* 1946 * Once bitten, forever smitten! 1947 * 1948 * If the active context ever busy-waited on a semaphore, 1949 * it will be treated as a hog until the end of its timeslice (i.e. 1950 * until it is scheduled out and replaced by a new submission, 1951 * possibly even its own lite-restore). The HW only sends an interrupt 1952 * on the first miss, and we do know if that semaphore has been 1953 * signaled, or even if it is now stuck on another semaphore. Play 1954 * safe, yield if it might be stuck -- it will be given a fresh 1955 * timeslice in the near future. 1956 */ 1957 return rq->context->lrc.ccid == READ_ONCE(el->yield); 1958 } 1959 1960 static bool 1961 timeslice_expired(const struct intel_engine_execlists *el, 1962 const struct i915_request *rq) 1963 { 1964 return timer_expired(&el->timer) || timeslice_yield(el, rq); 1965 } 1966 1967 static int 1968 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) 1969 { 1970 if (list_is_last(&rq->sched.link, &engine->active.requests)) 1971 return engine->execlists.queue_priority_hint; 1972 1973 return rq_prio(list_next_entry(rq, sched.link)); 1974 } 1975 1976 static inline unsigned long 1977 timeslice(const struct intel_engine_cs *engine) 1978 { 1979 return READ_ONCE(engine->props.timeslice_duration_ms); 1980 } 1981 1982 static unsigned long active_timeslice(const struct intel_engine_cs *engine) 1983 { 1984 const struct intel_engine_execlists *execlists = &engine->execlists; 1985 const struct i915_request *rq = *execlists->active; 1986 1987 if (!rq || i915_request_completed(rq)) 1988 return 0; 1989 1990 if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq)) 1991 return 0; 1992 1993 return timeslice(engine); 1994 } 1995 1996 static void set_timeslice(struct intel_engine_cs *engine) 1997 { 1998 unsigned long duration; 1999 2000 if (!intel_engine_has_timeslices(engine)) 2001 return; 2002 2003 duration = active_timeslice(engine); 2004 ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration); 2005 2006 set_timer_ms(&engine->execlists.timer, duration); 2007 } 2008 2009 static void start_timeslice(struct intel_engine_cs *engine, int prio) 2010 { 2011 struct intel_engine_execlists *execlists = &engine->execlists; 2012 unsigned long duration; 2013 2014 if (!intel_engine_has_timeslices(engine)) 2015 return; 2016 2017 WRITE_ONCE(execlists->switch_priority_hint, prio); 2018 if (prio == INT_MIN) 2019 return; 2020 2021 if (timer_pending(&execlists->timer)) 2022 return; 2023 2024 duration = timeslice(engine); 2025 ENGINE_TRACE(engine, 2026 "start timeslicing, prio:%d, interval:%lu", 2027 prio, duration); 2028 2029 set_timer_ms(&execlists->timer, duration); 2030 } 2031 2032 static void record_preemption(struct intel_engine_execlists *execlists) 2033 { 2034 (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); 2035 } 2036 2037 static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, 2038 const struct i915_request *rq) 2039 { 2040 if (!rq) 2041 return 0; 2042 2043 /* Force a fast reset for terminated contexts (ignoring sysfs!) */ 2044 if (unlikely(intel_context_is_banned(rq->context))) 2045 return 1; 2046 2047 return READ_ONCE(engine->props.preempt_timeout_ms); 2048 } 2049 2050 static void set_preempt_timeout(struct intel_engine_cs *engine, 2051 const struct i915_request *rq) 2052 { 2053 if (!intel_engine_has_preempt_reset(engine)) 2054 return; 2055 2056 set_timer_ms(&engine->execlists.preempt, 2057 active_preempt_timeout(engine, rq)); 2058 } 2059 2060 static inline void clear_ports(struct i915_request **ports, int count) 2061 { 2062 memset_p((void **)ports, NULL, count); 2063 } 2064 2065 static inline void 2066 copy_ports(struct i915_request **dst, struct i915_request **src, int count) 2067 { 2068 /* A memcpy_p() would be very useful here! */ 2069 while (count--) 2070 WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ 2071 } 2072 2073 static void execlists_dequeue(struct intel_engine_cs *engine) 2074 { 2075 struct intel_engine_execlists * const execlists = &engine->execlists; 2076 struct i915_request **port = execlists->pending; 2077 struct i915_request ** const last_port = port + execlists->port_mask; 2078 struct i915_request * const *active; 2079 struct i915_request *last; 2080 struct rb_node *rb; 2081 bool submit = false; 2082 2083 /* 2084 * Hardware submission is through 2 ports. Conceptually each port 2085 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is 2086 * static for a context, and unique to each, so we only execute 2087 * requests belonging to a single context from each ring. RING_HEAD 2088 * is maintained by the CS in the context image, it marks the place 2089 * where it got up to last time, and through RING_TAIL we tell the CS 2090 * where we want to execute up to this time. 2091 * 2092 * In this list the requests are in order of execution. Consecutive 2093 * requests from the same context are adjacent in the ringbuffer. We 2094 * can combine these requests into a single RING_TAIL update: 2095 * 2096 * RING_HEAD...req1...req2 2097 * ^- RING_TAIL 2098 * since to execute req2 the CS must first execute req1. 2099 * 2100 * Our goal then is to point each port to the end of a consecutive 2101 * sequence of requests as being the most optimal (fewest wake ups 2102 * and context switches) submission. 2103 */ 2104 2105 for (rb = rb_first_cached(&execlists->virtual); rb; ) { 2106 struct virtual_engine *ve = 2107 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2108 struct i915_request *rq = READ_ONCE(ve->request); 2109 2110 if (!rq) { /* lazily cleanup after another engine handled rq */ 2111 rb_erase_cached(rb, &execlists->virtual); 2112 RB_CLEAR_NODE(rb); 2113 rb = rb_first_cached(&execlists->virtual); 2114 continue; 2115 } 2116 2117 if (!virtual_matches(ve, rq, engine)) { 2118 rb = rb_next(rb); 2119 continue; 2120 } 2121 2122 break; 2123 } 2124 2125 /* 2126 * If the queue is higher priority than the last 2127 * request in the currently active context, submit afresh. 2128 * We will resubmit again afterwards in case we need to split 2129 * the active context to interject the preemption request, 2130 * i.e. we will retrigger preemption following the ack in case 2131 * of trouble. 2132 */ 2133 active = READ_ONCE(execlists->active); 2134 2135 /* 2136 * In theory we can skip over completed contexts that have not 2137 * yet been processed by events (as those events are in flight): 2138 * 2139 * while ((last = *active) && i915_request_completed(last)) 2140 * active++; 2141 * 2142 * However, the GPU cannot handle this as it will ultimately 2143 * find itself trying to jump back into a context it has just 2144 * completed and barf. 2145 */ 2146 2147 if ((last = *active)) { 2148 if (need_preempt(engine, last, rb)) { 2149 if (i915_request_completed(last)) { 2150 tasklet_hi_schedule(&execlists->tasklet); 2151 return; 2152 } 2153 2154 ENGINE_TRACE(engine, 2155 "preempting last=%llx:%lld, prio=%d, hint=%d\n", 2156 last->fence.context, 2157 last->fence.seqno, 2158 last->sched.attr.priority, 2159 execlists->queue_priority_hint); 2160 record_preemption(execlists); 2161 2162 /* 2163 * Don't let the RING_HEAD advance past the breadcrumb 2164 * as we unwind (and until we resubmit) so that we do 2165 * not accidentally tell it to go backwards. 2166 */ 2167 ring_set_paused(engine, 1); 2168 2169 /* 2170 * Note that we have not stopped the GPU at this point, 2171 * so we are unwinding the incomplete requests as they 2172 * remain inflight and so by the time we do complete 2173 * the preemption, some of the unwound requests may 2174 * complete! 2175 */ 2176 __unwind_incomplete_requests(engine); 2177 2178 last = NULL; 2179 } else if (need_timeslice(engine, last, rb) && 2180 timeslice_expired(execlists, last)) { 2181 if (i915_request_completed(last)) { 2182 tasklet_hi_schedule(&execlists->tasklet); 2183 return; 2184 } 2185 2186 ENGINE_TRACE(engine, 2187 "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", 2188 last->fence.context, 2189 last->fence.seqno, 2190 last->sched.attr.priority, 2191 execlists->queue_priority_hint, 2192 yesno(timeslice_yield(execlists, last))); 2193 2194 ring_set_paused(engine, 1); 2195 defer_active(engine); 2196 2197 /* 2198 * Unlike for preemption, if we rewind and continue 2199 * executing the same context as previously active, 2200 * the order of execution will remain the same and 2201 * the tail will only advance. We do not need to 2202 * force a full context restore, as a lite-restore 2203 * is sufficient to resample the monotonic TAIL. 2204 * 2205 * If we switch to any other context, similarly we 2206 * will not rewind TAIL of current context, and 2207 * normal save/restore will preserve state and allow 2208 * us to later continue executing the same request. 2209 */ 2210 last = NULL; 2211 } else { 2212 /* 2213 * Otherwise if we already have a request pending 2214 * for execution after the current one, we can 2215 * just wait until the next CS event before 2216 * queuing more. In either case we will force a 2217 * lite-restore preemption event, but if we wait 2218 * we hopefully coalesce several updates into a single 2219 * submission. 2220 */ 2221 if (!list_is_last(&last->sched.link, 2222 &engine->active.requests)) { 2223 /* 2224 * Even if ELSP[1] is occupied and not worthy 2225 * of timeslices, our queue might be. 2226 */ 2227 start_timeslice(engine, queue_prio(execlists)); 2228 return; 2229 } 2230 } 2231 } 2232 2233 while (rb) { /* XXX virtual is always taking precedence */ 2234 struct virtual_engine *ve = 2235 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 2236 struct i915_request *rq; 2237 2238 spin_lock(&ve->base.active.lock); 2239 2240 rq = ve->request; 2241 if (unlikely(!rq)) { /* lost the race to a sibling */ 2242 spin_unlock(&ve->base.active.lock); 2243 rb_erase_cached(rb, &execlists->virtual); 2244 RB_CLEAR_NODE(rb); 2245 rb = rb_first_cached(&execlists->virtual); 2246 continue; 2247 } 2248 2249 GEM_BUG_ON(rq != ve->request); 2250 GEM_BUG_ON(rq->engine != &ve->base); 2251 GEM_BUG_ON(rq->context != &ve->context); 2252 2253 if (rq_prio(rq) >= queue_prio(execlists)) { 2254 if (!virtual_matches(ve, rq, engine)) { 2255 spin_unlock(&ve->base.active.lock); 2256 rb = rb_next(rb); 2257 continue; 2258 } 2259 2260 if (last && !can_merge_rq(last, rq)) { 2261 spin_unlock(&ve->base.active.lock); 2262 start_timeslice(engine, rq_prio(rq)); 2263 return; /* leave this for another sibling */ 2264 } 2265 2266 ENGINE_TRACE(engine, 2267 "virtual rq=%llx:%lld%s, new engine? %s\n", 2268 rq->fence.context, 2269 rq->fence.seqno, 2270 i915_request_completed(rq) ? "!" : 2271 i915_request_started(rq) ? "*" : 2272 "", 2273 yesno(engine != ve->siblings[0])); 2274 2275 WRITE_ONCE(ve->request, NULL); 2276 WRITE_ONCE(ve->base.execlists.queue_priority_hint, 2277 INT_MIN); 2278 rb_erase_cached(rb, &execlists->virtual); 2279 RB_CLEAR_NODE(rb); 2280 2281 GEM_BUG_ON(!(rq->execution_mask & engine->mask)); 2282 WRITE_ONCE(rq->engine, engine); 2283 2284 if (__i915_request_submit(rq)) { 2285 /* 2286 * Only after we confirm that we will submit 2287 * this request (i.e. it has not already 2288 * completed), do we want to update the context. 2289 * 2290 * This serves two purposes. It avoids 2291 * unnecessary work if we are resubmitting an 2292 * already completed request after timeslicing. 2293 * But more importantly, it prevents us altering 2294 * ve->siblings[] on an idle context, where 2295 * we may be using ve->siblings[] in 2296 * virtual_context_enter / virtual_context_exit. 2297 */ 2298 virtual_xfer_context(ve, engine); 2299 GEM_BUG_ON(ve->siblings[0] != engine); 2300 2301 submit = true; 2302 last = rq; 2303 } 2304 i915_request_put(rq); 2305 2306 /* 2307 * Hmm, we have a bunch of virtual engine requests, 2308 * but the first one was already completed (thanks 2309 * preempt-to-busy!). Keep looking at the veng queue 2310 * until we have no more relevant requests (i.e. 2311 * the normal submit queue has higher priority). 2312 */ 2313 if (!submit) { 2314 spin_unlock(&ve->base.active.lock); 2315 rb = rb_first_cached(&execlists->virtual); 2316 continue; 2317 } 2318 } 2319 2320 spin_unlock(&ve->base.active.lock); 2321 break; 2322 } 2323 2324 while ((rb = rb_first_cached(&execlists->queue))) { 2325 struct i915_priolist *p = to_priolist(rb); 2326 struct i915_request *rq, *rn; 2327 int i; 2328 2329 priolist_for_each_request_consume(rq, rn, p, i) { 2330 bool merge = true; 2331 2332 /* 2333 * Can we combine this request with the current port? 2334 * It has to be the same context/ringbuffer and not 2335 * have any exceptions (e.g. GVT saying never to 2336 * combine contexts). 2337 * 2338 * If we can combine the requests, we can execute both 2339 * by updating the RING_TAIL to point to the end of the 2340 * second request, and so we never need to tell the 2341 * hardware about the first. 2342 */ 2343 if (last && !can_merge_rq(last, rq)) { 2344 /* 2345 * If we are on the second port and cannot 2346 * combine this request with the last, then we 2347 * are done. 2348 */ 2349 if (port == last_port) 2350 goto done; 2351 2352 /* 2353 * We must not populate both ELSP[] with the 2354 * same LRCA, i.e. we must submit 2 different 2355 * contexts if we submit 2 ELSP. 2356 */ 2357 if (last->context == rq->context) 2358 goto done; 2359 2360 if (i915_request_has_sentinel(last)) 2361 goto done; 2362 2363 /* 2364 * If GVT overrides us we only ever submit 2365 * port[0], leaving port[1] empty. Note that we 2366 * also have to be careful that we don't queue 2367 * the same context (even though a different 2368 * request) to the second port. 2369 */ 2370 if (ctx_single_port_submission(last->context) || 2371 ctx_single_port_submission(rq->context)) 2372 goto done; 2373 2374 merge = false; 2375 } 2376 2377 if (__i915_request_submit(rq)) { 2378 if (!merge) { 2379 *port = execlists_schedule_in(last, port - execlists->pending); 2380 port++; 2381 last = NULL; 2382 } 2383 2384 GEM_BUG_ON(last && 2385 !can_merge_ctx(last->context, 2386 rq->context)); 2387 GEM_BUG_ON(last && 2388 i915_seqno_passed(last->fence.seqno, 2389 rq->fence.seqno)); 2390 2391 submit = true; 2392 last = rq; 2393 } 2394 } 2395 2396 rb_erase_cached(&p->node, &execlists->queue); 2397 i915_priolist_free(p); 2398 } 2399 2400 done: 2401 /* 2402 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. 2403 * 2404 * We choose the priority hint such that if we add a request of greater 2405 * priority than this, we kick the submission tasklet to decide on 2406 * the right order of submitting the requests to hardware. We must 2407 * also be prepared to reorder requests as they are in-flight on the 2408 * HW. We derive the priority hint then as the first "hole" in 2409 * the HW submission ports and if there are no available slots, 2410 * the priority of the lowest executing request, i.e. last. 2411 * 2412 * When we do receive a higher priority request ready to run from the 2413 * user, see queue_request(), the priority hint is bumped to that 2414 * request triggering preemption on the next dequeue (or subsequent 2415 * interrupt for secondary ports). 2416 */ 2417 execlists->queue_priority_hint = queue_prio(execlists); 2418 2419 if (submit) { 2420 *port = execlists_schedule_in(last, port - execlists->pending); 2421 execlists->switch_priority_hint = 2422 switch_prio(engine, *execlists->pending); 2423 2424 /* 2425 * Skip if we ended up with exactly the same set of requests, 2426 * e.g. trying to timeslice a pair of ordered contexts 2427 */ 2428 if (!memcmp(active, execlists->pending, 2429 (port - execlists->pending + 1) * sizeof(*port))) { 2430 do 2431 execlists_schedule_out(fetch_and_zero(port)); 2432 while (port-- != execlists->pending); 2433 2434 goto skip_submit; 2435 } 2436 clear_ports(port + 1, last_port - port); 2437 2438 WRITE_ONCE(execlists->yield, -1); 2439 set_preempt_timeout(engine, *active); 2440 execlists_submit_ports(engine); 2441 } else { 2442 start_timeslice(engine, execlists->queue_priority_hint); 2443 skip_submit: 2444 ring_set_paused(engine, 0); 2445 } 2446 } 2447 2448 static void 2449 cancel_port_requests(struct intel_engine_execlists * const execlists) 2450 { 2451 struct i915_request * const *port; 2452 2453 for (port = execlists->pending; *port; port++) 2454 execlists_schedule_out(*port); 2455 clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); 2456 2457 /* Mark the end of active before we overwrite *active */ 2458 for (port = xchg(&execlists->active, execlists->pending); *port; port++) 2459 execlists_schedule_out(*port); 2460 clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); 2461 2462 smp_wmb(); /* complete the seqlock for execlists_active() */ 2463 WRITE_ONCE(execlists->active, execlists->inflight); 2464 } 2465 2466 static inline void 2467 invalidate_csb_entries(const u64 *first, const u64 *last) 2468 { 2469 clflush((void *)first); 2470 clflush((void *)last); 2471 } 2472 2473 /* 2474 * Starting with Gen12, the status has a new format: 2475 * 2476 * bit 0: switched to new queue 2477 * bit 1: reserved 2478 * bit 2: semaphore wait mode (poll or signal), only valid when 2479 * switch detail is set to "wait on semaphore" 2480 * bits 3-5: engine class 2481 * bits 6-11: engine instance 2482 * bits 12-14: reserved 2483 * bits 15-25: sw context id of the lrc the GT switched to 2484 * bits 26-31: sw counter of the lrc the GT switched to 2485 * bits 32-35: context switch detail 2486 * - 0: ctx complete 2487 * - 1: wait on sync flip 2488 * - 2: wait on vblank 2489 * - 3: wait on scanline 2490 * - 4: wait on semaphore 2491 * - 5: context preempted (not on SEMAPHORE_WAIT or 2492 * WAIT_FOR_EVENT) 2493 * bit 36: reserved 2494 * bits 37-43: wait detail (for switch detail 1 to 4) 2495 * bits 44-46: reserved 2496 * bits 47-57: sw context id of the lrc the GT switched away from 2497 * bits 58-63: sw counter of the lrc the GT switched away from 2498 */ 2499 static inline bool gen12_csb_parse(const u64 csb) 2500 { 2501 bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); 2502 bool new_queue = 2503 lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; 2504 2505 /* 2506 * The context switch detail is not guaranteed to be 5 when a preemption 2507 * occurs, so we can't just check for that. The check below works for 2508 * all the cases we care about, including preemptions of WAIT 2509 * instructions and lite-restore. Preempt-to-idle via the CTRL register 2510 * would require some extra handling, but we don't support that. 2511 */ 2512 if (!ctx_away_valid || new_queue) { 2513 GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb))); 2514 return true; 2515 } 2516 2517 /* 2518 * switch detail = 5 is covered by the case above and we do not expect a 2519 * context switch on an unsuccessful wait instruction since we always 2520 * use polling mode. 2521 */ 2522 GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); 2523 return false; 2524 } 2525 2526 static inline bool gen8_csb_parse(const u64 csb) 2527 { 2528 return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); 2529 } 2530 2531 static noinline u64 wa_csb_read(u64 * const csb) 2532 { 2533 u64 entry; 2534 2535 preempt_disable(); 2536 if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 50)) 2537 GEM_WARN_ON("50us CSB timeout"); 2538 preempt_enable(); 2539 2540 return entry; 2541 } 2542 2543 static inline u64 csb_read(u64 * const csb) 2544 { 2545 u64 entry = READ_ONCE(*csb); 2546 2547 /* 2548 * Unfortunately, the GPU does not always serialise its write 2549 * of the CSB entries before its write of the CSB pointer, at least 2550 * from the perspective of the CPU, using what is known as a Global 2551 * Observation Point. We may read a new CSB tail pointer, but then 2552 * read the stale CSB entries, causing us to misinterpret the 2553 * context-switch events, and eventually declare the GPU hung. 2554 * 2555 * icl:HSDES#1806554093 2556 * tgl:HSDES#22011248461 2557 */ 2558 if (unlikely(entry == -1)) 2559 entry = wa_csb_read(csb); 2560 2561 /* Consume this entry so that we can spot its future reuse. */ 2562 WRITE_ONCE(*csb, -1); 2563 2564 /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ 2565 return entry; 2566 } 2567 2568 static void process_csb(struct intel_engine_cs *engine) 2569 { 2570 struct intel_engine_execlists * const execlists = &engine->execlists; 2571 u64 * const buf = execlists->csb_status; 2572 const u8 num_entries = execlists->csb_size; 2573 u8 head, tail; 2574 2575 /* 2576 * As we modify our execlists state tracking we require exclusive 2577 * access. Either we are inside the tasklet, or the tasklet is disabled 2578 * and we assume that is only inside the reset paths and so serialised. 2579 */ 2580 GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && 2581 !reset_in_progress(execlists)); 2582 GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); 2583 2584 /* 2585 * Note that csb_write, csb_status may be either in HWSP or mmio. 2586 * When reading from the csb_write mmio register, we have to be 2587 * careful to only use the GEN8_CSB_WRITE_PTR portion, which is 2588 * the low 4bits. As it happens we know the next 4bits are always 2589 * zero and so we can simply masked off the low u8 of the register 2590 * and treat it identically to reading from the HWSP (without having 2591 * to use explicit shifting and masking, and probably bifurcating 2592 * the code to handle the legacy mmio read). 2593 */ 2594 head = execlists->csb_head; 2595 tail = READ_ONCE(*execlists->csb_write); 2596 if (unlikely(head == tail)) 2597 return; 2598 2599 /* 2600 * We will consume all events from HW, or at least pretend to. 2601 * 2602 * The sequence of events from the HW is deterministic, and derived 2603 * from our writes to the ELSP, with a smidgen of variability for 2604 * the arrival of the asynchronous requests wrt to the inflight 2605 * execution. If the HW sends an event that does not correspond with 2606 * the one we are expecting, we have to abandon all hope as we lose 2607 * all tracking of what the engine is actually executing. We will 2608 * only detect we are out of sequence with the HW when we get an 2609 * 'impossible' event because we have already drained our own 2610 * preemption/promotion queue. If this occurs, we know that we likely 2611 * lost track of execution earlier and must unwind and restart, the 2612 * simplest way is by stop processing the event queue and force the 2613 * engine to reset. 2614 */ 2615 execlists->csb_head = tail; 2616 ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); 2617 2618 /* 2619 * Hopefully paired with a wmb() in HW! 2620 * 2621 * We must complete the read of the write pointer before any reads 2622 * from the CSB, so that we do not see stale values. Without an rmb 2623 * (lfence) the HW may speculatively perform the CSB[] reads *before* 2624 * we perform the READ_ONCE(*csb_write). 2625 */ 2626 rmb(); 2627 do { 2628 bool promote; 2629 u64 csb; 2630 2631 if (++head == num_entries) 2632 head = 0; 2633 2634 /* 2635 * We are flying near dragons again. 2636 * 2637 * We hold a reference to the request in execlist_port[] 2638 * but no more than that. We are operating in softirq 2639 * context and so cannot hold any mutex or sleep. That 2640 * prevents us stopping the requests we are processing 2641 * in port[] from being retired simultaneously (the 2642 * breadcrumb will be complete before we see the 2643 * context-switch). As we only hold the reference to the 2644 * request, any pointer chasing underneath the request 2645 * is subject to a potential use-after-free. Thus we 2646 * store all of the bookkeeping within port[] as 2647 * required, and avoid using unguarded pointers beneath 2648 * request itself. The same applies to the atomic 2649 * status notifier. 2650 */ 2651 2652 csb = csb_read(buf + head); 2653 ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", 2654 head, upper_32_bits(csb), lower_32_bits(csb)); 2655 2656 if (INTEL_GEN(engine->i915) >= 12) 2657 promote = gen12_csb_parse(csb); 2658 else 2659 promote = gen8_csb_parse(csb); 2660 if (promote) { 2661 struct i915_request * const *old = execlists->active; 2662 2663 if (GEM_WARN_ON(!*execlists->pending)) { 2664 execlists->error_interrupt |= ERROR_CSB; 2665 break; 2666 } 2667 2668 ring_set_paused(engine, 0); 2669 2670 /* Point active to the new ELSP; prevent overwriting */ 2671 WRITE_ONCE(execlists->active, execlists->pending); 2672 smp_wmb(); /* notify execlists_active() */ 2673 2674 /* cancel old inflight, prepare for switch */ 2675 trace_ports(execlists, "preempted", old); 2676 while (*old) 2677 execlists_schedule_out(*old++); 2678 2679 /* switch pending to inflight */ 2680 GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); 2681 copy_ports(execlists->inflight, 2682 execlists->pending, 2683 execlists_num_ports(execlists)); 2684 smp_wmb(); /* complete the seqlock */ 2685 WRITE_ONCE(execlists->active, execlists->inflight); 2686 2687 WRITE_ONCE(execlists->pending[0], NULL); 2688 } else { 2689 if (GEM_WARN_ON(!*execlists->active)) { 2690 execlists->error_interrupt |= ERROR_CSB; 2691 break; 2692 } 2693 2694 /* port0 completed, advanced to port1 */ 2695 trace_ports(execlists, "completed", execlists->active); 2696 2697 /* 2698 * We rely on the hardware being strongly 2699 * ordered, that the breadcrumb write is 2700 * coherent (visible from the CPU) before the 2701 * user interrupt is processed. One might assume 2702 * that the breadcrumb write being before the 2703 * user interrupt and the CS event for the context 2704 * switch would therefore be before the CS event 2705 * itself... 2706 */ 2707 if (GEM_SHOW_DEBUG() && 2708 !i915_request_completed(*execlists->active)) { 2709 struct i915_request *rq = *execlists->active; 2710 const u32 *regs __maybe_unused = 2711 rq->context->lrc_reg_state; 2712 2713 ENGINE_TRACE(engine, 2714 "context completed before request!\n"); 2715 ENGINE_TRACE(engine, 2716 "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", 2717 ENGINE_READ(engine, RING_START), 2718 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, 2719 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, 2720 ENGINE_READ(engine, RING_CTL), 2721 ENGINE_READ(engine, RING_MI_MODE)); 2722 ENGINE_TRACE(engine, 2723 "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", 2724 i915_ggtt_offset(rq->ring->vma), 2725 rq->head, rq->tail, 2726 rq->fence.context, 2727 lower_32_bits(rq->fence.seqno), 2728 hwsp_seqno(rq)); 2729 ENGINE_TRACE(engine, 2730 "ctx:{start:%08x, head:%04x, tail:%04x}, ", 2731 regs[CTX_RING_START], 2732 regs[CTX_RING_HEAD], 2733 regs[CTX_RING_TAIL]); 2734 } 2735 2736 execlists_schedule_out(*execlists->active++); 2737 2738 GEM_BUG_ON(execlists->active - execlists->inflight > 2739 execlists_num_ports(execlists)); 2740 } 2741 } while (head != tail); 2742 2743 set_timeslice(engine); 2744 2745 /* 2746 * Gen11 has proven to fail wrt global observation point between 2747 * entry and tail update, failing on the ordering and thus 2748 * we see an old entry in the context status buffer. 2749 * 2750 * Forcibly evict out entries for the next gpu csb update, 2751 * to increase the odds that we get a fresh entries with non 2752 * working hardware. The cost for doing so comes out mostly with 2753 * the wash as hardware, working or not, will need to do the 2754 * invalidation before. 2755 */ 2756 invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); 2757 } 2758 2759 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) 2760 { 2761 lockdep_assert_held(&engine->active.lock); 2762 if (!READ_ONCE(engine->execlists.pending[0])) { 2763 rcu_read_lock(); /* protect peeking at execlists->active */ 2764 execlists_dequeue(engine); 2765 rcu_read_unlock(); 2766 } 2767 } 2768 2769 static void __execlists_hold(struct i915_request *rq) 2770 { 2771 LIST_HEAD(list); 2772 2773 do { 2774 struct i915_dependency *p; 2775 2776 if (i915_request_is_active(rq)) 2777 __i915_request_unsubmit(rq); 2778 2779 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 2780 list_move_tail(&rq->sched.link, &rq->engine->active.hold); 2781 i915_request_set_hold(rq); 2782 RQ_TRACE(rq, "on hold\n"); 2783 2784 for_each_waiter(p, rq) { 2785 struct i915_request *w = 2786 container_of(p->waiter, typeof(*w), sched); 2787 2788 /* Leave semaphores spinning on the other engines */ 2789 if (w->engine != rq->engine) 2790 continue; 2791 2792 if (!i915_request_is_ready(w)) 2793 continue; 2794 2795 if (i915_request_completed(w)) 2796 continue; 2797 2798 if (i915_request_on_hold(w)) 2799 continue; 2800 2801 list_move_tail(&w->sched.link, &list); 2802 } 2803 2804 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 2805 } while (rq); 2806 } 2807 2808 static bool execlists_hold(struct intel_engine_cs *engine, 2809 struct i915_request *rq) 2810 { 2811 spin_lock_irq(&engine->active.lock); 2812 2813 if (i915_request_completed(rq)) { /* too late! */ 2814 rq = NULL; 2815 goto unlock; 2816 } 2817 2818 if (rq->engine != engine) { /* preempted virtual engine */ 2819 struct virtual_engine *ve = to_virtual_engine(rq->engine); 2820 2821 /* 2822 * intel_context_inflight() is only protected by virtue 2823 * of process_csb() being called only by the tasklet (or 2824 * directly from inside reset while the tasklet is suspended). 2825 * Assert that neither of those are allowed to run while we 2826 * poke at the request queues. 2827 */ 2828 GEM_BUG_ON(!reset_in_progress(&engine->execlists)); 2829 2830 /* 2831 * An unsubmitted request along a virtual engine will 2832 * remain on the active (this) engine until we are able 2833 * to process the context switch away (and so mark the 2834 * context as no longer in flight). That cannot have happened 2835 * yet, otherwise we would not be hanging! 2836 */ 2837 spin_lock(&ve->base.active.lock); 2838 GEM_BUG_ON(intel_context_inflight(rq->context) != engine); 2839 GEM_BUG_ON(ve->request != rq); 2840 ve->request = NULL; 2841 spin_unlock(&ve->base.active.lock); 2842 i915_request_put(rq); 2843 2844 rq->engine = engine; 2845 } 2846 2847 /* 2848 * Transfer this request onto the hold queue to prevent it 2849 * being resumbitted to HW (and potentially completed) before we have 2850 * released it. Since we may have already submitted following 2851 * requests, we need to remove those as well. 2852 */ 2853 GEM_BUG_ON(i915_request_on_hold(rq)); 2854 GEM_BUG_ON(rq->engine != engine); 2855 __execlists_hold(rq); 2856 GEM_BUG_ON(list_empty(&engine->active.hold)); 2857 2858 unlock: 2859 spin_unlock_irq(&engine->active.lock); 2860 return rq; 2861 } 2862 2863 static bool hold_request(const struct i915_request *rq) 2864 { 2865 struct i915_dependency *p; 2866 bool result = false; 2867 2868 /* 2869 * If one of our ancestors is on hold, we must also be on hold, 2870 * otherwise we will bypass it and execute before it. 2871 */ 2872 rcu_read_lock(); 2873 for_each_signaler(p, rq) { 2874 const struct i915_request *s = 2875 container_of(p->signaler, typeof(*s), sched); 2876 2877 if (s->engine != rq->engine) 2878 continue; 2879 2880 result = i915_request_on_hold(s); 2881 if (result) 2882 break; 2883 } 2884 rcu_read_unlock(); 2885 2886 return result; 2887 } 2888 2889 static void __execlists_unhold(struct i915_request *rq) 2890 { 2891 LIST_HEAD(list); 2892 2893 do { 2894 struct i915_dependency *p; 2895 2896 RQ_TRACE(rq, "hold release\n"); 2897 2898 GEM_BUG_ON(!i915_request_on_hold(rq)); 2899 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 2900 2901 i915_request_clear_hold(rq); 2902 list_move_tail(&rq->sched.link, 2903 i915_sched_lookup_priolist(rq->engine, 2904 rq_prio(rq))); 2905 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 2906 2907 /* Also release any children on this engine that are ready */ 2908 for_each_waiter(p, rq) { 2909 struct i915_request *w = 2910 container_of(p->waiter, typeof(*w), sched); 2911 2912 /* Propagate any change in error status */ 2913 if (rq->fence.error) 2914 i915_request_set_error_once(w, rq->fence.error); 2915 2916 if (w->engine != rq->engine) 2917 continue; 2918 2919 if (!i915_request_on_hold(w)) 2920 continue; 2921 2922 /* Check that no other parents are also on hold */ 2923 if (hold_request(w)) 2924 continue; 2925 2926 list_move_tail(&w->sched.link, &list); 2927 } 2928 2929 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); 2930 } while (rq); 2931 } 2932 2933 static void execlists_unhold(struct intel_engine_cs *engine, 2934 struct i915_request *rq) 2935 { 2936 spin_lock_irq(&engine->active.lock); 2937 2938 /* 2939 * Move this request back to the priority queue, and all of its 2940 * children and grandchildren that were suspended along with it. 2941 */ 2942 __execlists_unhold(rq); 2943 2944 if (rq_prio(rq) > engine->execlists.queue_priority_hint) { 2945 engine->execlists.queue_priority_hint = rq_prio(rq); 2946 tasklet_hi_schedule(&engine->execlists.tasklet); 2947 } 2948 2949 spin_unlock_irq(&engine->active.lock); 2950 } 2951 2952 struct execlists_capture { 2953 struct work_struct work; 2954 struct i915_request *rq; 2955 struct i915_gpu_coredump *error; 2956 }; 2957 2958 static void execlists_capture_work(struct work_struct *work) 2959 { 2960 struct execlists_capture *cap = container_of(work, typeof(*cap), work); 2961 const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; 2962 struct intel_engine_cs *engine = cap->rq->engine; 2963 struct intel_gt_coredump *gt = cap->error->gt; 2964 struct intel_engine_capture_vma *vma; 2965 2966 /* Compress all the objects attached to the request, slow! */ 2967 vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp); 2968 if (vma) { 2969 struct i915_vma_compress *compress = 2970 i915_vma_capture_prepare(gt); 2971 2972 intel_engine_coredump_add_vma(gt->engine, vma, compress); 2973 i915_vma_capture_finish(gt, compress); 2974 } 2975 2976 gt->simulated = gt->engine->simulated; 2977 cap->error->simulated = gt->simulated; 2978 2979 /* Publish the error state, and announce it to the world */ 2980 i915_error_state_store(cap->error); 2981 i915_gpu_coredump_put(cap->error); 2982 2983 /* Return this request and all that depend upon it for signaling */ 2984 execlists_unhold(engine, cap->rq); 2985 i915_request_put(cap->rq); 2986 2987 kfree(cap); 2988 } 2989 2990 static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) 2991 { 2992 const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; 2993 struct execlists_capture *cap; 2994 2995 cap = kmalloc(sizeof(*cap), gfp); 2996 if (!cap) 2997 return NULL; 2998 2999 cap->error = i915_gpu_coredump_alloc(engine->i915, gfp); 3000 if (!cap->error) 3001 goto err_cap; 3002 3003 cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp); 3004 if (!cap->error->gt) 3005 goto err_gpu; 3006 3007 cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp); 3008 if (!cap->error->gt->engine) 3009 goto err_gt; 3010 3011 return cap; 3012 3013 err_gt: 3014 kfree(cap->error->gt); 3015 err_gpu: 3016 kfree(cap->error); 3017 err_cap: 3018 kfree(cap); 3019 return NULL; 3020 } 3021 3022 static struct i915_request * 3023 active_context(struct intel_engine_cs *engine, u32 ccid) 3024 { 3025 const struct intel_engine_execlists * const el = &engine->execlists; 3026 struct i915_request * const *port, *rq; 3027 3028 /* 3029 * Use the most recent result from process_csb(), but just in case 3030 * we trigger an error (via interrupt) before the first CS event has 3031 * been written, peek at the next submission. 3032 */ 3033 3034 for (port = el->active; (rq = *port); port++) { 3035 if (rq->context->lrc.ccid == ccid) { 3036 ENGINE_TRACE(engine, 3037 "ccid found at active:%zd\n", 3038 port - el->active); 3039 return rq; 3040 } 3041 } 3042 3043 for (port = el->pending; (rq = *port); port++) { 3044 if (rq->context->lrc.ccid == ccid) { 3045 ENGINE_TRACE(engine, 3046 "ccid found at pending:%zd\n", 3047 port - el->pending); 3048 return rq; 3049 } 3050 } 3051 3052 ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); 3053 return NULL; 3054 } 3055 3056 static u32 active_ccid(struct intel_engine_cs *engine) 3057 { 3058 return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); 3059 } 3060 3061 static void execlists_capture(struct intel_engine_cs *engine) 3062 { 3063 struct execlists_capture *cap; 3064 3065 if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) 3066 return; 3067 3068 /* 3069 * We need to _quickly_ capture the engine state before we reset. 3070 * We are inside an atomic section (softirq) here and we are delaying 3071 * the forced preemption event. 3072 */ 3073 cap = capture_regs(engine); 3074 if (!cap) 3075 return; 3076 3077 spin_lock_irq(&engine->active.lock); 3078 cap->rq = active_context(engine, active_ccid(engine)); 3079 if (cap->rq) { 3080 cap->rq = active_request(cap->rq->context->timeline, cap->rq); 3081 cap->rq = i915_request_get_rcu(cap->rq); 3082 } 3083 spin_unlock_irq(&engine->active.lock); 3084 if (!cap->rq) 3085 goto err_free; 3086 3087 /* 3088 * Remove the request from the execlists queue, and take ownership 3089 * of the request. We pass it to our worker who will _slowly_ compress 3090 * all the pages the _user_ requested for debugging their batch, after 3091 * which we return it to the queue for signaling. 3092 * 3093 * By removing them from the execlists queue, we also remove the 3094 * requests from being processed by __unwind_incomplete_requests() 3095 * during the intel_engine_reset(), and so they will *not* be replayed 3096 * afterwards. 3097 * 3098 * Note that because we have not yet reset the engine at this point, 3099 * it is possible for the request that we have identified as being 3100 * guilty, did in fact complete and we will then hit an arbitration 3101 * point allowing the outstanding preemption to succeed. The likelihood 3102 * of that is very low (as capturing of the engine registers should be 3103 * fast enough to run inside an irq-off atomic section!), so we will 3104 * simply hold that request accountable for being non-preemptible 3105 * long enough to force the reset. 3106 */ 3107 if (!execlists_hold(engine, cap->rq)) 3108 goto err_rq; 3109 3110 INIT_WORK(&cap->work, execlists_capture_work); 3111 schedule_work(&cap->work); 3112 return; 3113 3114 err_rq: 3115 i915_request_put(cap->rq); 3116 err_free: 3117 i915_gpu_coredump_put(cap->error); 3118 kfree(cap); 3119 } 3120 3121 static void execlists_reset(struct intel_engine_cs *engine, const char *msg) 3122 { 3123 const unsigned int bit = I915_RESET_ENGINE + engine->id; 3124 unsigned long *lock = &engine->gt->reset.flags; 3125 3126 if (!intel_has_reset_engine(engine->gt)) 3127 return; 3128 3129 if (test_and_set_bit(bit, lock)) 3130 return; 3131 3132 ENGINE_TRACE(engine, "reset for %s\n", msg); 3133 3134 /* Mark this tasklet as disabled to avoid waiting for it to complete */ 3135 tasklet_disable_nosync(&engine->execlists.tasklet); 3136 3137 ring_set_paused(engine, 1); /* Freeze the current request in place */ 3138 execlists_capture(engine); 3139 intel_engine_reset(engine, msg); 3140 3141 tasklet_enable(&engine->execlists.tasklet); 3142 clear_and_wake_up_bit(bit, lock); 3143 } 3144 3145 static bool preempt_timeout(const struct intel_engine_cs *const engine) 3146 { 3147 const struct timer_list *t = &engine->execlists.preempt; 3148 3149 if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) 3150 return false; 3151 3152 if (!timer_expired(t)) 3153 return false; 3154 3155 return READ_ONCE(engine->execlists.pending[0]); 3156 } 3157 3158 /* 3159 * Check the unread Context Status Buffers and manage the submission of new 3160 * contexts to the ELSP accordingly. 3161 */ 3162 static void execlists_submission_tasklet(unsigned long data) 3163 { 3164 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 3165 bool timeout = preempt_timeout(engine); 3166 3167 process_csb(engine); 3168 3169 if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { 3170 const char *msg; 3171 3172 /* Generate the error message in priority wrt to the user! */ 3173 if (engine->execlists.error_interrupt & GENMASK(15, 0)) 3174 msg = "CS error"; /* thrown by a user payload */ 3175 else if (engine->execlists.error_interrupt & ERROR_CSB) 3176 msg = "invalid CSB event"; 3177 else 3178 msg = "internal error"; 3179 3180 engine->execlists.error_interrupt = 0; 3181 execlists_reset(engine, msg); 3182 } 3183 3184 if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { 3185 unsigned long flags; 3186 3187 spin_lock_irqsave(&engine->active.lock, flags); 3188 __execlists_submission_tasklet(engine); 3189 spin_unlock_irqrestore(&engine->active.lock, flags); 3190 3191 /* Recheck after serialising with direct-submission */ 3192 if (unlikely(timeout && preempt_timeout(engine))) 3193 execlists_reset(engine, "preemption time out"); 3194 } 3195 } 3196 3197 static void __execlists_kick(struct intel_engine_execlists *execlists) 3198 { 3199 /* Kick the tasklet for some interrupt coalescing and reset handling */ 3200 tasklet_hi_schedule(&execlists->tasklet); 3201 } 3202 3203 #define execlists_kick(t, member) \ 3204 __execlists_kick(container_of(t, struct intel_engine_execlists, member)) 3205 3206 static void execlists_timeslice(struct timer_list *timer) 3207 { 3208 execlists_kick(timer, timer); 3209 } 3210 3211 static void execlists_preempt(struct timer_list *timer) 3212 { 3213 execlists_kick(timer, preempt); 3214 } 3215 3216 static void queue_request(struct intel_engine_cs *engine, 3217 struct i915_request *rq) 3218 { 3219 GEM_BUG_ON(!list_empty(&rq->sched.link)); 3220 list_add_tail(&rq->sched.link, 3221 i915_sched_lookup_priolist(engine, rq_prio(rq))); 3222 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 3223 } 3224 3225 static void __submit_queue_imm(struct intel_engine_cs *engine) 3226 { 3227 struct intel_engine_execlists * const execlists = &engine->execlists; 3228 3229 if (reset_in_progress(execlists)) 3230 return; /* defer until we restart the engine following reset */ 3231 3232 __execlists_submission_tasklet(engine); 3233 } 3234 3235 static void submit_queue(struct intel_engine_cs *engine, 3236 const struct i915_request *rq) 3237 { 3238 struct intel_engine_execlists *execlists = &engine->execlists; 3239 3240 if (rq_prio(rq) <= execlists->queue_priority_hint) 3241 return; 3242 3243 execlists->queue_priority_hint = rq_prio(rq); 3244 __submit_queue_imm(engine); 3245 } 3246 3247 static bool ancestor_on_hold(const struct intel_engine_cs *engine, 3248 const struct i915_request *rq) 3249 { 3250 GEM_BUG_ON(i915_request_on_hold(rq)); 3251 return !list_empty(&engine->active.hold) && hold_request(rq); 3252 } 3253 3254 static void flush_csb(struct intel_engine_cs *engine) 3255 { 3256 struct intel_engine_execlists *el = &engine->execlists; 3257 3258 if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) { 3259 if (!reset_in_progress(el)) 3260 process_csb(engine); 3261 tasklet_unlock(&el->tasklet); 3262 } 3263 } 3264 3265 static void execlists_submit_request(struct i915_request *request) 3266 { 3267 struct intel_engine_cs *engine = request->engine; 3268 unsigned long flags; 3269 3270 /* Hopefully we clear execlists->pending[] to let us through */ 3271 flush_csb(engine); 3272 3273 /* Will be called from irq-context when using foreign fences. */ 3274 spin_lock_irqsave(&engine->active.lock, flags); 3275 3276 if (unlikely(ancestor_on_hold(engine, request))) { 3277 RQ_TRACE(request, "ancestor on hold\n"); 3278 list_add_tail(&request->sched.link, &engine->active.hold); 3279 i915_request_set_hold(request); 3280 } else { 3281 queue_request(engine, request); 3282 3283 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); 3284 GEM_BUG_ON(list_empty(&request->sched.link)); 3285 3286 submit_queue(engine, request); 3287 } 3288 3289 spin_unlock_irqrestore(&engine->active.lock, flags); 3290 } 3291 3292 static void __execlists_context_fini(struct intel_context *ce) 3293 { 3294 intel_ring_put(ce->ring); 3295 i915_vma_put(ce->state); 3296 } 3297 3298 static void execlists_context_destroy(struct kref *kref) 3299 { 3300 struct intel_context *ce = container_of(kref, typeof(*ce), ref); 3301 3302 GEM_BUG_ON(!i915_active_is_idle(&ce->active)); 3303 GEM_BUG_ON(intel_context_is_pinned(ce)); 3304 3305 if (ce->state) 3306 __execlists_context_fini(ce); 3307 3308 intel_context_fini(ce); 3309 intel_context_free(ce); 3310 } 3311 3312 static void 3313 set_redzone(void *vaddr, const struct intel_engine_cs *engine) 3314 { 3315 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 3316 return; 3317 3318 vaddr += engine->context_size; 3319 3320 memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); 3321 } 3322 3323 static void 3324 check_redzone(const void *vaddr, const struct intel_engine_cs *engine) 3325 { 3326 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 3327 return; 3328 3329 vaddr += engine->context_size; 3330 3331 if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) 3332 drm_err_once(&engine->i915->drm, 3333 "%s context redzone overwritten!\n", 3334 engine->name); 3335 } 3336 3337 static void execlists_context_unpin(struct intel_context *ce) 3338 { 3339 check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, 3340 ce->engine); 3341 } 3342 3343 static void execlists_context_post_unpin(struct intel_context *ce) 3344 { 3345 i915_gem_object_unpin_map(ce->state->obj); 3346 } 3347 3348 static u32 * 3349 gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) 3350 { 3351 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3352 MI_SRM_LRM_GLOBAL_GTT | 3353 MI_LRI_LRM_CS_MMIO; 3354 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3355 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3356 CTX_TIMESTAMP * sizeof(u32); 3357 *cs++ = 0; 3358 3359 *cs++ = MI_LOAD_REGISTER_REG | 3360 MI_LRR_SOURCE_CS_MMIO | 3361 MI_LRI_LRM_CS_MMIO; 3362 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3363 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 3364 3365 *cs++ = MI_LOAD_REGISTER_REG | 3366 MI_LRR_SOURCE_CS_MMIO | 3367 MI_LRI_LRM_CS_MMIO; 3368 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3369 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0)); 3370 3371 return cs; 3372 } 3373 3374 static u32 * 3375 gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs) 3376 { 3377 GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1); 3378 3379 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3380 MI_SRM_LRM_GLOBAL_GTT | 3381 MI_LRI_LRM_CS_MMIO; 3382 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3383 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3384 (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32); 3385 *cs++ = 0; 3386 3387 return cs; 3388 } 3389 3390 static u32 * 3391 gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs) 3392 { 3393 GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1); 3394 3395 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 3396 MI_SRM_LRM_GLOBAL_GTT | 3397 MI_LRI_LRM_CS_MMIO; 3398 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3399 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET + 3400 (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32); 3401 *cs++ = 0; 3402 3403 *cs++ = MI_LOAD_REGISTER_REG | 3404 MI_LRR_SOURCE_CS_MMIO | 3405 MI_LRI_LRM_CS_MMIO; 3406 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0)); 3407 *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0)); 3408 3409 return cs; 3410 } 3411 3412 static u32 * 3413 gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) 3414 { 3415 cs = gen12_emit_timestamp_wa(ce, cs); 3416 cs = gen12_emit_cmd_buf_wa(ce, cs); 3417 cs = gen12_emit_restore_scratch(ce, cs); 3418 3419 return cs; 3420 } 3421 3422 static u32 * 3423 gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) 3424 { 3425 cs = gen12_emit_timestamp_wa(ce, cs); 3426 cs = gen12_emit_restore_scratch(ce, cs); 3427 3428 return cs; 3429 } 3430 3431 static inline u32 context_wa_bb_offset(const struct intel_context *ce) 3432 { 3433 return PAGE_SIZE * ce->wa_bb_page; 3434 } 3435 3436 static u32 *context_indirect_bb(const struct intel_context *ce) 3437 { 3438 void *ptr; 3439 3440 GEM_BUG_ON(!ce->wa_bb_page); 3441 3442 ptr = ce->lrc_reg_state; 3443 ptr -= LRC_STATE_OFFSET; /* back to start of context image */ 3444 ptr += context_wa_bb_offset(ce); 3445 3446 return ptr; 3447 } 3448 3449 static void 3450 setup_indirect_ctx_bb(const struct intel_context *ce, 3451 const struct intel_engine_cs *engine, 3452 u32 *(*emit)(const struct intel_context *, u32 *)) 3453 { 3454 u32 * const start = context_indirect_bb(ce); 3455 u32 *cs; 3456 3457 cs = emit(ce, start); 3458 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs)); 3459 while ((unsigned long)cs % CACHELINE_BYTES) 3460 *cs++ = MI_NOOP; 3461 3462 lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine, 3463 i915_ggtt_offset(ce->state) + 3464 context_wa_bb_offset(ce), 3465 (cs - start) * sizeof(*cs)); 3466 } 3467 3468 static void 3469 __execlists_update_reg_state(const struct intel_context *ce, 3470 const struct intel_engine_cs *engine, 3471 u32 head) 3472 { 3473 struct intel_ring *ring = ce->ring; 3474 u32 *regs = ce->lrc_reg_state; 3475 3476 GEM_BUG_ON(!intel_ring_offset_valid(ring, head)); 3477 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 3478 3479 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); 3480 regs[CTX_RING_HEAD] = head; 3481 regs[CTX_RING_TAIL] = ring->tail; 3482 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 3483 3484 /* RPCS */ 3485 if (engine->class == RENDER_CLASS) { 3486 regs[CTX_R_PWR_CLK_STATE] = 3487 intel_sseu_make_rpcs(engine->gt, &ce->sseu); 3488 3489 i915_oa_init_reg_state(ce, engine); 3490 } 3491 3492 if (ce->wa_bb_page) { 3493 u32 *(*fn)(const struct intel_context *ce, u32 *cs); 3494 3495 fn = gen12_emit_indirect_ctx_xcs; 3496 if (ce->engine->class == RENDER_CLASS) 3497 fn = gen12_emit_indirect_ctx_rcs; 3498 3499 /* Mutually exclusive wrt to global indirect bb */ 3500 GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); 3501 setup_indirect_ctx_bb(ce, engine, fn); 3502 } 3503 } 3504 3505 static int 3506 execlists_context_pre_pin(struct intel_context *ce, 3507 struct i915_gem_ww_ctx *ww, void **vaddr) 3508 { 3509 GEM_BUG_ON(!ce->state); 3510 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 3511 3512 *vaddr = i915_gem_object_pin_map(ce->state->obj, 3513 i915_coherent_map_type(ce->engine->i915) | 3514 I915_MAP_OVERRIDE); 3515 3516 return PTR_ERR_OR_ZERO(*vaddr); 3517 } 3518 3519 static int 3520 __execlists_context_pin(struct intel_context *ce, 3521 struct intel_engine_cs *engine, 3522 void *vaddr) 3523 { 3524 ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; 3525 ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; 3526 __execlists_update_reg_state(ce, engine, ce->ring->tail); 3527 3528 return 0; 3529 } 3530 3531 static int execlists_context_pin(struct intel_context *ce, void *vaddr) 3532 { 3533 return __execlists_context_pin(ce, ce->engine, vaddr); 3534 } 3535 3536 static int execlists_context_alloc(struct intel_context *ce) 3537 { 3538 return __execlists_context_alloc(ce, ce->engine); 3539 } 3540 3541 static void execlists_context_reset(struct intel_context *ce) 3542 { 3543 CE_TRACE(ce, "reset\n"); 3544 GEM_BUG_ON(!intel_context_is_pinned(ce)); 3545 3546 intel_ring_reset(ce->ring, ce->ring->emit); 3547 3548 /* Scrub away the garbage */ 3549 execlists_init_reg_state(ce->lrc_reg_state, 3550 ce, ce->engine, ce->ring, true); 3551 __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); 3552 3553 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; 3554 } 3555 3556 static const struct intel_context_ops execlists_context_ops = { 3557 .alloc = execlists_context_alloc, 3558 3559 .pre_pin = execlists_context_pre_pin, 3560 .pin = execlists_context_pin, 3561 .unpin = execlists_context_unpin, 3562 .post_unpin = execlists_context_post_unpin, 3563 3564 .enter = intel_context_enter_engine, 3565 .exit = intel_context_exit_engine, 3566 3567 .reset = execlists_context_reset, 3568 .destroy = execlists_context_destroy, 3569 }; 3570 3571 static int gen8_emit_init_breadcrumb(struct i915_request *rq) 3572 { 3573 u32 *cs; 3574 3575 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq)); 3576 if (!i915_request_timeline(rq)->has_initial_breadcrumb) 3577 return 0; 3578 3579 cs = intel_ring_begin(rq, 6); 3580 if (IS_ERR(cs)) 3581 return PTR_ERR(cs); 3582 3583 /* 3584 * Check if we have been preempted before we even get started. 3585 * 3586 * After this point i915_request_started() reports true, even if 3587 * we get preempted and so are no longer running. 3588 */ 3589 *cs++ = MI_ARB_CHECK; 3590 *cs++ = MI_NOOP; 3591 3592 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 3593 *cs++ = i915_request_timeline(rq)->hwsp_offset; 3594 *cs++ = 0; 3595 *cs++ = rq->fence.seqno - 1; 3596 3597 intel_ring_advance(rq, cs); 3598 3599 /* Record the updated position of the request's payload */ 3600 rq->infix = intel_ring_offset(rq, cs); 3601 3602 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); 3603 3604 return 0; 3605 } 3606 3607 static int emit_pdps(struct i915_request *rq) 3608 { 3609 const struct intel_engine_cs * const engine = rq->engine; 3610 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm); 3611 int err, i; 3612 u32 *cs; 3613 3614 GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); 3615 3616 /* 3617 * Beware ye of the dragons, this sequence is magic! 3618 * 3619 * Small changes to this sequence can cause anything from 3620 * GPU hangs to forcewake errors and machine lockups! 3621 */ 3622 3623 /* Flush any residual operations from the context load */ 3624 err = engine->emit_flush(rq, EMIT_FLUSH); 3625 if (err) 3626 return err; 3627 3628 /* Magic required to prevent forcewake errors! */ 3629 err = engine->emit_flush(rq, EMIT_INVALIDATE); 3630 if (err) 3631 return err; 3632 3633 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 3634 if (IS_ERR(cs)) 3635 return PTR_ERR(cs); 3636 3637 /* Ensure the LRI have landed before we invalidate & continue */ 3638 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 3639 for (i = GEN8_3LVL_PDPES; i--; ) { 3640 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 3641 u32 base = engine->mmio_base; 3642 3643 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 3644 *cs++ = upper_32_bits(pd_daddr); 3645 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 3646 *cs++ = lower_32_bits(pd_daddr); 3647 } 3648 *cs++ = MI_NOOP; 3649 3650 intel_ring_advance(rq, cs); 3651 3652 return 0; 3653 } 3654 3655 static int execlists_request_alloc(struct i915_request *request) 3656 { 3657 int ret; 3658 3659 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 3660 3661 /* 3662 * Flush enough space to reduce the likelihood of waiting after 3663 * we start building the request - in which case we will just 3664 * have to repeat work. 3665 */ 3666 request->reserved_space += EXECLISTS_REQUEST_SIZE; 3667 3668 /* 3669 * Note that after this point, we have committed to using 3670 * this request as it is being used to both track the 3671 * state of engine initialisation and liveness of the 3672 * golden renderstate above. Think twice before you try 3673 * to cancel/unwind this request now. 3674 */ 3675 3676 if (!i915_vm_is_4lvl(request->context->vm)) { 3677 ret = emit_pdps(request); 3678 if (ret) 3679 return ret; 3680 } 3681 3682 /* Unconditionally invalidate GPU caches and TLBs. */ 3683 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 3684 if (ret) 3685 return ret; 3686 3687 request->reserved_space -= EXECLISTS_REQUEST_SIZE; 3688 return 0; 3689 } 3690 3691 /* 3692 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after 3693 * PIPE_CONTROL instruction. This is required for the flush to happen correctly 3694 * but there is a slight complication as this is applied in WA batch where the 3695 * values are only initialized once so we cannot take register value at the 3696 * beginning and reuse it further; hence we save its value to memory, upload a 3697 * constant value with bit21 set and then we restore it back with the saved value. 3698 * To simplify the WA, a constant value is formed by using the default value 3699 * of this register. This shouldn't be a problem because we are only modifying 3700 * it for a short period and this batch in non-premptible. We can ofcourse 3701 * use additional instructions that read the actual value of the register 3702 * at that time and set our bit of interest but it makes the WA complicated. 3703 * 3704 * This WA is also required for Gen9 so extracting as a function avoids 3705 * code duplication. 3706 */ 3707 static u32 * 3708 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) 3709 { 3710 /* NB no one else is allowed to scribble over scratch + 256! */ 3711 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 3712 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3713 *batch++ = intel_gt_scratch_offset(engine->gt, 3714 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 3715 *batch++ = 0; 3716 3717 *batch++ = MI_LOAD_REGISTER_IMM(1); 3718 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3719 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES; 3720 3721 batch = gen8_emit_pipe_control(batch, 3722 PIPE_CONTROL_CS_STALL | 3723 PIPE_CONTROL_DC_FLUSH_ENABLE, 3724 0); 3725 3726 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; 3727 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4); 3728 *batch++ = intel_gt_scratch_offset(engine->gt, 3729 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA); 3730 *batch++ = 0; 3731 3732 return batch; 3733 } 3734 3735 /* 3736 * Typically we only have one indirect_ctx and per_ctx batch buffer which are 3737 * initialized at the beginning and shared across all contexts but this field 3738 * helps us to have multiple batches at different offsets and select them based 3739 * on a criteria. At the moment this batch always start at the beginning of the page 3740 * and at this point we don't have multiple wa_ctx batch buffers. 3741 * 3742 * The number of WA applied are not known at the beginning; we use this field 3743 * to return the no of DWORDS written. 3744 * 3745 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END 3746 * so it adds NOOPs as padding to make it cacheline aligned. 3747 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together 3748 * makes a complete batch buffer. 3749 */ 3750 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3751 { 3752 /* WaDisableCtxRestoreArbitration:bdw,chv */ 3753 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 3754 3755 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 3756 if (IS_BROADWELL(engine->i915)) 3757 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 3758 3759 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ 3760 /* Actual scratch location is at 128 bytes offset */ 3761 batch = gen8_emit_pipe_control(batch, 3762 PIPE_CONTROL_FLUSH_L3 | 3763 PIPE_CONTROL_STORE_DATA_INDEX | 3764 PIPE_CONTROL_CS_STALL | 3765 PIPE_CONTROL_QW_WRITE, 3766 LRC_PPHWSP_SCRATCH_ADDR); 3767 3768 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 3769 3770 /* Pad to end of cacheline */ 3771 while ((unsigned long)batch % CACHELINE_BYTES) 3772 *batch++ = MI_NOOP; 3773 3774 /* 3775 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because 3776 * execution depends on the length specified in terms of cache lines 3777 * in the register CTX_RCS_INDIRECT_CTX 3778 */ 3779 3780 return batch; 3781 } 3782 3783 struct lri { 3784 i915_reg_t reg; 3785 u32 value; 3786 }; 3787 3788 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count) 3789 { 3790 GEM_BUG_ON(!count || count > 63); 3791 3792 *batch++ = MI_LOAD_REGISTER_IMM(count); 3793 do { 3794 *batch++ = i915_mmio_reg_offset(lri->reg); 3795 *batch++ = lri->value; 3796 } while (lri++, --count); 3797 *batch++ = MI_NOOP; 3798 3799 return batch; 3800 } 3801 3802 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3803 { 3804 static const struct lri lri[] = { 3805 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 3806 { 3807 COMMON_SLICE_CHICKEN2, 3808 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE, 3809 0), 3810 }, 3811 3812 /* BSpec: 11391 */ 3813 { 3814 FF_SLICE_CHICKEN, 3815 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX, 3816 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX), 3817 }, 3818 3819 /* BSpec: 11299 */ 3820 { 3821 _3D_CHICKEN3, 3822 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX, 3823 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX), 3824 } 3825 }; 3826 3827 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 3828 3829 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 3830 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 3831 3832 /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */ 3833 batch = gen8_emit_pipe_control(batch, 3834 PIPE_CONTROL_FLUSH_L3 | 3835 PIPE_CONTROL_STORE_DATA_INDEX | 3836 PIPE_CONTROL_CS_STALL | 3837 PIPE_CONTROL_QW_WRITE, 3838 LRC_PPHWSP_SCRATCH_ADDR); 3839 3840 batch = emit_lri(batch, lri, ARRAY_SIZE(lri)); 3841 3842 /* WaMediaPoolStateCmdInWABB:bxt,glk */ 3843 if (HAS_POOLED_EU(engine->i915)) { 3844 /* 3845 * EU pool configuration is setup along with golden context 3846 * during context initialization. This value depends on 3847 * device type (2x6 or 3x6) and needs to be updated based 3848 * on which subslice is disabled especially for 2x6 3849 * devices, however it is safe to load default 3850 * configuration of 3x6 device instead of masking off 3851 * corresponding bits because HW ignores bits of a disabled 3852 * subslice and drops down to appropriate config. Please 3853 * see render_state_setup() in i915_gem_render_state.c for 3854 * possible configurations, to avoid duplication they are 3855 * not shown here again. 3856 */ 3857 *batch++ = GEN9_MEDIA_POOL_STATE; 3858 *batch++ = GEN9_MEDIA_POOL_ENABLE; 3859 *batch++ = 0x00777000; 3860 *batch++ = 0; 3861 *batch++ = 0; 3862 *batch++ = 0; 3863 } 3864 3865 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 3866 3867 /* Pad to end of cacheline */ 3868 while ((unsigned long)batch % CACHELINE_BYTES) 3869 *batch++ = MI_NOOP; 3870 3871 return batch; 3872 } 3873 3874 static u32 * 3875 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) 3876 { 3877 int i; 3878 3879 /* 3880 * WaPipeControlBefore3DStateSamplePattern: cnl 3881 * 3882 * Ensure the engine is idle prior to programming a 3883 * 3DSTATE_SAMPLE_PATTERN during a context restore. 3884 */ 3885 batch = gen8_emit_pipe_control(batch, 3886 PIPE_CONTROL_CS_STALL, 3887 0); 3888 /* 3889 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for 3890 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in 3891 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is 3892 * confusing. Since gen8_emit_pipe_control() already advances the 3893 * batch by 6 dwords, we advance the other 10 here, completing a 3894 * cacheline. It's not clear if the workaround requires this padding 3895 * before other commands, or if it's just the regular padding we would 3896 * already have for the workaround bb, so leave it here for now. 3897 */ 3898 for (i = 0; i < 10; i++) 3899 *batch++ = MI_NOOP; 3900 3901 /* Pad to end of cacheline */ 3902 while ((unsigned long)batch % CACHELINE_BYTES) 3903 *batch++ = MI_NOOP; 3904 3905 return batch; 3906 } 3907 3908 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) 3909 3910 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) 3911 { 3912 struct drm_i915_gem_object *obj; 3913 struct i915_vma *vma; 3914 int err; 3915 3916 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); 3917 if (IS_ERR(obj)) 3918 return PTR_ERR(obj); 3919 3920 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 3921 if (IS_ERR(vma)) { 3922 err = PTR_ERR(vma); 3923 goto err; 3924 } 3925 3926 err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH); 3927 if (err) 3928 goto err; 3929 3930 engine->wa_ctx.vma = vma; 3931 return 0; 3932 3933 err: 3934 i915_gem_object_put(obj); 3935 return err; 3936 } 3937 3938 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) 3939 { 3940 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 3941 } 3942 3943 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); 3944 3945 static int intel_init_workaround_bb(struct intel_engine_cs *engine) 3946 { 3947 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; 3948 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, 3949 &wa_ctx->per_ctx }; 3950 wa_bb_func_t wa_bb_fn[2]; 3951 void *batch, *batch_ptr; 3952 unsigned int i; 3953 int ret; 3954 3955 if (engine->class != RENDER_CLASS) 3956 return 0; 3957 3958 switch (INTEL_GEN(engine->i915)) { 3959 case 12: 3960 case 11: 3961 return 0; 3962 case 10: 3963 wa_bb_fn[0] = gen10_init_indirectctx_bb; 3964 wa_bb_fn[1] = NULL; 3965 break; 3966 case 9: 3967 wa_bb_fn[0] = gen9_init_indirectctx_bb; 3968 wa_bb_fn[1] = NULL; 3969 break; 3970 case 8: 3971 wa_bb_fn[0] = gen8_init_indirectctx_bb; 3972 wa_bb_fn[1] = NULL; 3973 break; 3974 default: 3975 MISSING_CASE(INTEL_GEN(engine->i915)); 3976 return 0; 3977 } 3978 3979 ret = lrc_setup_wa_ctx(engine); 3980 if (ret) { 3981 drm_dbg(&engine->i915->drm, 3982 "Failed to setup context WA page: %d\n", ret); 3983 return ret; 3984 } 3985 3986 batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); 3987 3988 /* 3989 * Emit the two workaround batch buffers, recording the offset from the 3990 * start of the workaround batch buffer object for each and their 3991 * respective sizes. 3992 */ 3993 batch_ptr = batch; 3994 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { 3995 wa_bb[i]->offset = batch_ptr - batch; 3996 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, 3997 CACHELINE_BYTES))) { 3998 ret = -EINVAL; 3999 break; 4000 } 4001 if (wa_bb_fn[i]) 4002 batch_ptr = wa_bb_fn[i](engine, batch_ptr); 4003 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); 4004 } 4005 GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); 4006 4007 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); 4008 __i915_gem_object_release_map(wa_ctx->vma->obj); 4009 if (ret) 4010 lrc_destroy_wa_ctx(engine); 4011 4012 return ret; 4013 } 4014 4015 static void reset_csb_pointers(struct intel_engine_cs *engine) 4016 { 4017 struct intel_engine_execlists * const execlists = &engine->execlists; 4018 const unsigned int reset_value = execlists->csb_size - 1; 4019 4020 ring_set_paused(engine, 0); 4021 4022 /* 4023 * Sometimes Icelake forgets to reset its pointers on a GPU reset. 4024 * Bludgeon them with a mmio update to be sure. 4025 */ 4026 ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, 4027 0xffff << 16 | reset_value << 8 | reset_value); 4028 ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); 4029 4030 /* 4031 * After a reset, the HW starts writing into CSB entry [0]. We 4032 * therefore have to set our HEAD pointer back one entry so that 4033 * the *first* entry we check is entry 0. To complicate this further, 4034 * as we don't wait for the first interrupt after reset, we have to 4035 * fake the HW write to point back to the last entry so that our 4036 * inline comparison of our cached head position against the last HW 4037 * write works even before the first interrupt. 4038 */ 4039 execlists->csb_head = reset_value; 4040 WRITE_ONCE(*execlists->csb_write, reset_value); 4041 wmb(); /* Make sure this is visible to HW (paranoia?) */ 4042 4043 /* Check that the GPU does indeed update the CSB entries! */ 4044 memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); 4045 invalidate_csb_entries(&execlists->csb_status[0], 4046 &execlists->csb_status[reset_value]); 4047 4048 /* Once more for luck and our trusty paranoia */ 4049 ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, 4050 0xffff << 16 | reset_value << 8 | reset_value); 4051 ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); 4052 4053 GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value); 4054 } 4055 4056 static void execlists_sanitize(struct intel_engine_cs *engine) 4057 { 4058 /* 4059 * Poison residual state on resume, in case the suspend didn't! 4060 * 4061 * We have to assume that across suspend/resume (or other loss 4062 * of control) that the contents of our pinned buffers has been 4063 * lost, replaced by garbage. Since this doesn't always happen, 4064 * let's poison such state so that we more quickly spot when 4065 * we falsely assume it has been preserved. 4066 */ 4067 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 4068 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 4069 4070 reset_csb_pointers(engine); 4071 4072 /* 4073 * The kernel_context HWSP is stored in the status_page. As above, 4074 * that may be lost on resume/initialisation, and so we need to 4075 * reset the value in the HWSP. 4076 */ 4077 intel_timeline_reset_seqno(engine->kernel_context->timeline); 4078 4079 /* And scrub the dirty cachelines for the HWSP */ 4080 clflush_cache_range(engine->status_page.addr, PAGE_SIZE); 4081 } 4082 4083 static void enable_error_interrupt(struct intel_engine_cs *engine) 4084 { 4085 u32 status; 4086 4087 engine->execlists.error_interrupt = 0; 4088 ENGINE_WRITE(engine, RING_EMR, ~0u); 4089 ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ 4090 4091 status = ENGINE_READ(engine, RING_ESR); 4092 if (unlikely(status)) { 4093 drm_err(&engine->i915->drm, 4094 "engine '%s' resumed still in error: %08x\n", 4095 engine->name, status); 4096 __intel_gt_reset(engine->gt, engine->mask); 4097 } 4098 4099 /* 4100 * On current gen8+, we have 2 signals to play with 4101 * 4102 * - I915_ERROR_INSTUCTION (bit 0) 4103 * 4104 * Generate an error if the command parser encounters an invalid 4105 * instruction 4106 * 4107 * This is a fatal error. 4108 * 4109 * - CP_PRIV (bit 2) 4110 * 4111 * Generate an error on privilege violation (where the CP replaces 4112 * the instruction with a no-op). This also fires for writes into 4113 * read-only scratch pages. 4114 * 4115 * This is a non-fatal error, parsing continues. 4116 * 4117 * * there are a few others defined for odd HW that we do not use 4118 * 4119 * Since CP_PRIV fires for cases where we have chosen to ignore the 4120 * error (as the HW is validating and suppressing the mistakes), we 4121 * only unmask the instruction error bit. 4122 */ 4123 ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); 4124 } 4125 4126 static void enable_execlists(struct intel_engine_cs *engine) 4127 { 4128 u32 mode; 4129 4130 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); 4131 4132 intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ 4133 4134 if (INTEL_GEN(engine->i915) >= 11) 4135 mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); 4136 else 4137 mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); 4138 ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); 4139 4140 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 4141 4142 ENGINE_WRITE_FW(engine, 4143 RING_HWS_PGA, 4144 i915_ggtt_offset(engine->status_page.vma)); 4145 ENGINE_POSTING_READ(engine, RING_HWS_PGA); 4146 4147 enable_error_interrupt(engine); 4148 4149 engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); 4150 } 4151 4152 static bool unexpected_starting_state(struct intel_engine_cs *engine) 4153 { 4154 bool unexpected = false; 4155 4156 if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { 4157 drm_dbg(&engine->i915->drm, 4158 "STOP_RING still set in RING_MI_MODE\n"); 4159 unexpected = true; 4160 } 4161 4162 return unexpected; 4163 } 4164 4165 static int execlists_resume(struct intel_engine_cs *engine) 4166 { 4167 intel_mocs_init_engine(engine); 4168 4169 intel_breadcrumbs_reset(engine->breadcrumbs); 4170 4171 if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { 4172 struct drm_printer p = drm_debug_printer(__func__); 4173 4174 intel_engine_dump(engine, &p, NULL); 4175 } 4176 4177 enable_execlists(engine); 4178 4179 return 0; 4180 } 4181 4182 static void execlists_reset_prepare(struct intel_engine_cs *engine) 4183 { 4184 struct intel_engine_execlists * const execlists = &engine->execlists; 4185 unsigned long flags; 4186 4187 ENGINE_TRACE(engine, "depth<-%d\n", 4188 atomic_read(&execlists->tasklet.count)); 4189 4190 /* 4191 * Prevent request submission to the hardware until we have 4192 * completed the reset in i915_gem_reset_finish(). If a request 4193 * is completed by one engine, it may then queue a request 4194 * to a second via its execlists->tasklet *just* as we are 4195 * calling engine->resume() and also writing the ELSP. 4196 * Turning off the execlists->tasklet until the reset is over 4197 * prevents the race. 4198 */ 4199 __tasklet_disable_sync_once(&execlists->tasklet); 4200 GEM_BUG_ON(!reset_in_progress(execlists)); 4201 4202 /* And flush any current direct submission. */ 4203 spin_lock_irqsave(&engine->active.lock, flags); 4204 spin_unlock_irqrestore(&engine->active.lock, flags); 4205 4206 /* 4207 * We stop engines, otherwise we might get failed reset and a 4208 * dead gpu (on elk). Also as modern gpu as kbl can suffer 4209 * from system hang if batchbuffer is progressing when 4210 * the reset is issued, regardless of READY_TO_RESET ack. 4211 * Thus assume it is best to stop engines on all gens 4212 * where we have a gpu reset. 4213 * 4214 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 4215 * 4216 * FIXME: Wa for more modern gens needs to be validated 4217 */ 4218 ring_set_paused(engine, 1); 4219 intel_engine_stop_cs(engine); 4220 4221 engine->execlists.reset_ccid = active_ccid(engine); 4222 } 4223 4224 static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) 4225 { 4226 int x; 4227 4228 x = lrc_ring_mi_mode(engine); 4229 if (x != -1) { 4230 regs[x + 1] &= ~STOP_RING; 4231 regs[x + 1] |= STOP_RING << 16; 4232 } 4233 } 4234 4235 static void __execlists_reset_reg_state(const struct intel_context *ce, 4236 const struct intel_engine_cs *engine) 4237 { 4238 u32 *regs = ce->lrc_reg_state; 4239 4240 __reset_stop_ring(regs, engine); 4241 } 4242 4243 static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) 4244 { 4245 struct intel_engine_execlists * const execlists = &engine->execlists; 4246 struct intel_context *ce; 4247 struct i915_request *rq; 4248 u32 head; 4249 4250 mb(); /* paranoia: read the CSB pointers from after the reset */ 4251 clflush(execlists->csb_write); 4252 mb(); 4253 4254 process_csb(engine); /* drain preemption events */ 4255 4256 /* Following the reset, we need to reload the CSB read/write pointers */ 4257 reset_csb_pointers(engine); 4258 4259 /* 4260 * Save the currently executing context, even if we completed 4261 * its request, it was still running at the time of the 4262 * reset and will have been clobbered. 4263 */ 4264 rq = active_context(engine, engine->execlists.reset_ccid); 4265 if (!rq) 4266 goto unwind; 4267 4268 ce = rq->context; 4269 GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); 4270 4271 if (i915_request_completed(rq)) { 4272 /* Idle context; tidy up the ring so we can restart afresh */ 4273 head = intel_ring_wrap(ce->ring, rq->tail); 4274 goto out_replay; 4275 } 4276 4277 /* We still have requests in-flight; the engine should be active */ 4278 GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); 4279 4280 /* Context has requests still in-flight; it should not be idle! */ 4281 GEM_BUG_ON(i915_active_is_idle(&ce->active)); 4282 4283 rq = active_request(ce->timeline, rq); 4284 head = intel_ring_wrap(ce->ring, rq->head); 4285 GEM_BUG_ON(head == ce->ring->tail); 4286 4287 /* 4288 * If this request hasn't started yet, e.g. it is waiting on a 4289 * semaphore, we need to avoid skipping the request or else we 4290 * break the signaling chain. However, if the context is corrupt 4291 * the request will not restart and we will be stuck with a wedged 4292 * device. It is quite often the case that if we issue a reset 4293 * while the GPU is loading the context image, that the context 4294 * image becomes corrupt. 4295 * 4296 * Otherwise, if we have not started yet, the request should replay 4297 * perfectly and we do not need to flag the result as being erroneous. 4298 */ 4299 if (!i915_request_started(rq)) 4300 goto out_replay; 4301 4302 /* 4303 * If the request was innocent, we leave the request in the ELSP 4304 * and will try to replay it on restarting. The context image may 4305 * have been corrupted by the reset, in which case we may have 4306 * to service a new GPU hang, but more likely we can continue on 4307 * without impact. 4308 * 4309 * If the request was guilty, we presume the context is corrupt 4310 * and have to at least restore the RING register in the context 4311 * image back to the expected values to skip over the guilty request. 4312 */ 4313 __i915_request_reset(rq, stalled); 4314 4315 /* 4316 * We want a simple context + ring to execute the breadcrumb update. 4317 * We cannot rely on the context being intact across the GPU hang, 4318 * so clear it and rebuild just what we need for the breadcrumb. 4319 * All pending requests for this context will be zapped, and any 4320 * future request will be after userspace has had the opportunity 4321 * to recreate its own state. 4322 */ 4323 out_replay: 4324 ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", 4325 head, ce->ring->tail); 4326 __execlists_reset_reg_state(ce, engine); 4327 __execlists_update_reg_state(ce, engine, head); 4328 ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ 4329 4330 unwind: 4331 /* Push back any incomplete requests for replay after the reset. */ 4332 cancel_port_requests(execlists); 4333 __unwind_incomplete_requests(engine); 4334 } 4335 4336 static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) 4337 { 4338 unsigned long flags; 4339 4340 ENGINE_TRACE(engine, "\n"); 4341 4342 spin_lock_irqsave(&engine->active.lock, flags); 4343 4344 __execlists_reset(engine, stalled); 4345 4346 spin_unlock_irqrestore(&engine->active.lock, flags); 4347 } 4348 4349 static void nop_submission_tasklet(unsigned long data) 4350 { 4351 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; 4352 4353 /* The driver is wedged; don't process any more events. */ 4354 WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); 4355 } 4356 4357 static void execlists_reset_cancel(struct intel_engine_cs *engine) 4358 { 4359 struct intel_engine_execlists * const execlists = &engine->execlists; 4360 struct i915_request *rq, *rn; 4361 struct rb_node *rb; 4362 unsigned long flags; 4363 4364 ENGINE_TRACE(engine, "\n"); 4365 4366 /* 4367 * Before we call engine->cancel_requests(), we should have exclusive 4368 * access to the submission state. This is arranged for us by the 4369 * caller disabling the interrupt generation, the tasklet and other 4370 * threads that may then access the same state, giving us a free hand 4371 * to reset state. However, we still need to let lockdep be aware that 4372 * we know this state may be accessed in hardirq context, so we 4373 * disable the irq around this manipulation and we want to keep 4374 * the spinlock focused on its duties and not accidentally conflate 4375 * coverage to the submission's irq state. (Similarly, although we 4376 * shouldn't need to disable irq around the manipulation of the 4377 * submission's irq state, we also wish to remind ourselves that 4378 * it is irq state.) 4379 */ 4380 spin_lock_irqsave(&engine->active.lock, flags); 4381 4382 __execlists_reset(engine, true); 4383 4384 /* Mark all executing requests as skipped. */ 4385 list_for_each_entry(rq, &engine->active.requests, sched.link) 4386 mark_eio(rq); 4387 4388 /* Flush the queued requests to the timeline list (for retiring). */ 4389 while ((rb = rb_first_cached(&execlists->queue))) { 4390 struct i915_priolist *p = to_priolist(rb); 4391 int i; 4392 4393 priolist_for_each_request_consume(rq, rn, p, i) { 4394 mark_eio(rq); 4395 __i915_request_submit(rq); 4396 } 4397 4398 rb_erase_cached(&p->node, &execlists->queue); 4399 i915_priolist_free(p); 4400 } 4401 4402 /* On-hold requests will be flushed to timeline upon their release */ 4403 list_for_each_entry(rq, &engine->active.hold, sched.link) 4404 mark_eio(rq); 4405 4406 /* Cancel all attached virtual engines */ 4407 while ((rb = rb_first_cached(&execlists->virtual))) { 4408 struct virtual_engine *ve = 4409 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 4410 4411 rb_erase_cached(rb, &execlists->virtual); 4412 RB_CLEAR_NODE(rb); 4413 4414 spin_lock(&ve->base.active.lock); 4415 rq = fetch_and_zero(&ve->request); 4416 if (rq) { 4417 mark_eio(rq); 4418 4419 rq->engine = engine; 4420 __i915_request_submit(rq); 4421 i915_request_put(rq); 4422 4423 ve->base.execlists.queue_priority_hint = INT_MIN; 4424 } 4425 spin_unlock(&ve->base.active.lock); 4426 } 4427 4428 /* Remaining _unready_ requests will be nop'ed when submitted */ 4429 4430 execlists->queue_priority_hint = INT_MIN; 4431 execlists->queue = RB_ROOT_CACHED; 4432 4433 GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); 4434 execlists->tasklet.func = nop_submission_tasklet; 4435 4436 spin_unlock_irqrestore(&engine->active.lock, flags); 4437 } 4438 4439 static void execlists_reset_finish(struct intel_engine_cs *engine) 4440 { 4441 struct intel_engine_execlists * const execlists = &engine->execlists; 4442 4443 /* 4444 * After a GPU reset, we may have requests to replay. Do so now while 4445 * we still have the forcewake to be sure that the GPU is not allowed 4446 * to sleep before we restart and reload a context. 4447 */ 4448 GEM_BUG_ON(!reset_in_progress(execlists)); 4449 if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) 4450 execlists->tasklet.func(execlists->tasklet.data); 4451 4452 if (__tasklet_enable(&execlists->tasklet)) 4453 /* And kick in case we missed a new request submission. */ 4454 tasklet_hi_schedule(&execlists->tasklet); 4455 ENGINE_TRACE(engine, "depth->%d\n", 4456 atomic_read(&execlists->tasklet.count)); 4457 } 4458 4459 static int gen8_emit_bb_start_noarb(struct i915_request *rq, 4460 u64 offset, u32 len, 4461 const unsigned int flags) 4462 { 4463 u32 *cs; 4464 4465 cs = intel_ring_begin(rq, 4); 4466 if (IS_ERR(cs)) 4467 return PTR_ERR(cs); 4468 4469 /* 4470 * WaDisableCtxRestoreArbitration:bdw,chv 4471 * 4472 * We don't need to perform MI_ARB_ENABLE as often as we do (in 4473 * particular all the gen that do not need the w/a at all!), if we 4474 * took care to make sure that on every switch into this context 4475 * (both ordinary and for preemption) that arbitrartion was enabled 4476 * we would be fine. However, for gen8 there is another w/a that 4477 * requires us to not preempt inside GPGPU execution, so we keep 4478 * arbitration disabled for gen8 batches. Arbitration will be 4479 * re-enabled before we close the request 4480 * (engine->emit_fini_breadcrumb). 4481 */ 4482 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 4483 4484 /* FIXME(BDW+): Address space and security selectors. */ 4485 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 4486 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 4487 *cs++ = lower_32_bits(offset); 4488 *cs++ = upper_32_bits(offset); 4489 4490 intel_ring_advance(rq, cs); 4491 4492 return 0; 4493 } 4494 4495 static int gen8_emit_bb_start(struct i915_request *rq, 4496 u64 offset, u32 len, 4497 const unsigned int flags) 4498 { 4499 u32 *cs; 4500 4501 cs = intel_ring_begin(rq, 6); 4502 if (IS_ERR(cs)) 4503 return PTR_ERR(cs); 4504 4505 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4506 4507 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 4508 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 4509 *cs++ = lower_32_bits(offset); 4510 *cs++ = upper_32_bits(offset); 4511 4512 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 4513 *cs++ = MI_NOOP; 4514 4515 intel_ring_advance(rq, cs); 4516 4517 return 0; 4518 } 4519 4520 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) 4521 { 4522 ENGINE_WRITE(engine, RING_IMR, 4523 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 4524 ENGINE_POSTING_READ(engine, RING_IMR); 4525 } 4526 4527 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) 4528 { 4529 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 4530 } 4531 4532 static int gen8_emit_flush(struct i915_request *request, u32 mode) 4533 { 4534 u32 cmd, *cs; 4535 4536 cs = intel_ring_begin(request, 4); 4537 if (IS_ERR(cs)) 4538 return PTR_ERR(cs); 4539 4540 cmd = MI_FLUSH_DW + 1; 4541 4542 /* We always require a command barrier so that subsequent 4543 * commands, such as breadcrumb interrupts, are strictly ordered 4544 * wrt the contents of the write cache being flushed to memory 4545 * (and thus being coherent from the CPU). 4546 */ 4547 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 4548 4549 if (mode & EMIT_INVALIDATE) { 4550 cmd |= MI_INVALIDATE_TLB; 4551 if (request->engine->class == VIDEO_DECODE_CLASS) 4552 cmd |= MI_INVALIDATE_BSD; 4553 } 4554 4555 *cs++ = cmd; 4556 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 4557 *cs++ = 0; /* upper addr */ 4558 *cs++ = 0; /* value */ 4559 intel_ring_advance(request, cs); 4560 4561 return 0; 4562 } 4563 4564 static int gen8_emit_flush_render(struct i915_request *request, 4565 u32 mode) 4566 { 4567 bool vf_flush_wa = false, dc_flush_wa = false; 4568 u32 *cs, flags = 0; 4569 int len; 4570 4571 flags |= PIPE_CONTROL_CS_STALL; 4572 4573 if (mode & EMIT_FLUSH) { 4574 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4575 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4576 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4577 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4578 } 4579 4580 if (mode & EMIT_INVALIDATE) { 4581 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4582 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4583 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4584 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4585 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4586 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4587 flags |= PIPE_CONTROL_QW_WRITE; 4588 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4589 4590 /* 4591 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 4592 * pipe control. 4593 */ 4594 if (IS_GEN(request->engine->i915, 9)) 4595 vf_flush_wa = true; 4596 4597 /* WaForGAMHang:kbl */ 4598 if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0)) 4599 dc_flush_wa = true; 4600 } 4601 4602 len = 6; 4603 4604 if (vf_flush_wa) 4605 len += 6; 4606 4607 if (dc_flush_wa) 4608 len += 12; 4609 4610 cs = intel_ring_begin(request, len); 4611 if (IS_ERR(cs)) 4612 return PTR_ERR(cs); 4613 4614 if (vf_flush_wa) 4615 cs = gen8_emit_pipe_control(cs, 0, 0); 4616 4617 if (dc_flush_wa) 4618 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 4619 0); 4620 4621 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4622 4623 if (dc_flush_wa) 4624 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); 4625 4626 intel_ring_advance(request, cs); 4627 4628 return 0; 4629 } 4630 4631 static int gen11_emit_flush_render(struct i915_request *request, 4632 u32 mode) 4633 { 4634 if (mode & EMIT_FLUSH) { 4635 u32 *cs; 4636 u32 flags = 0; 4637 4638 flags |= PIPE_CONTROL_CS_STALL; 4639 4640 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 4641 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4642 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4643 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4644 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4645 flags |= PIPE_CONTROL_QW_WRITE; 4646 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4647 4648 cs = intel_ring_begin(request, 6); 4649 if (IS_ERR(cs)) 4650 return PTR_ERR(cs); 4651 4652 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4653 intel_ring_advance(request, cs); 4654 } 4655 4656 if (mode & EMIT_INVALIDATE) { 4657 u32 *cs; 4658 u32 flags = 0; 4659 4660 flags |= PIPE_CONTROL_CS_STALL; 4661 4662 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 4663 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4664 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4665 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4666 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4667 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4668 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4669 flags |= PIPE_CONTROL_QW_WRITE; 4670 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4671 4672 cs = intel_ring_begin(request, 6); 4673 if (IS_ERR(cs)) 4674 return PTR_ERR(cs); 4675 4676 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4677 intel_ring_advance(request, cs); 4678 } 4679 4680 return 0; 4681 } 4682 4683 static u32 preparser_disable(bool state) 4684 { 4685 return MI_ARB_CHECK | 1 << 8 | state; 4686 } 4687 4688 static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine) 4689 { 4690 static const i915_reg_t vd[] = { 4691 GEN12_VD0_AUX_NV, 4692 GEN12_VD1_AUX_NV, 4693 GEN12_VD2_AUX_NV, 4694 GEN12_VD3_AUX_NV, 4695 }; 4696 4697 static const i915_reg_t ve[] = { 4698 GEN12_VE0_AUX_NV, 4699 GEN12_VE1_AUX_NV, 4700 }; 4701 4702 if (engine->class == VIDEO_DECODE_CLASS) 4703 return vd[engine->instance]; 4704 4705 if (engine->class == VIDEO_ENHANCEMENT_CLASS) 4706 return ve[engine->instance]; 4707 4708 GEM_BUG_ON("unknown aux_inv_reg\n"); 4709 4710 return INVALID_MMIO_REG; 4711 } 4712 4713 static u32 * 4714 gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs) 4715 { 4716 *cs++ = MI_LOAD_REGISTER_IMM(1); 4717 *cs++ = i915_mmio_reg_offset(inv_reg); 4718 *cs++ = AUX_INV; 4719 *cs++ = MI_NOOP; 4720 4721 return cs; 4722 } 4723 4724 static int gen12_emit_flush_render(struct i915_request *request, 4725 u32 mode) 4726 { 4727 if (mode & EMIT_FLUSH) { 4728 u32 flags = 0; 4729 u32 *cs; 4730 4731 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 4732 flags |= PIPE_CONTROL_FLUSH_L3; 4733 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 4734 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 4735 /* Wa_1409600907:tgl */ 4736 flags |= PIPE_CONTROL_DEPTH_STALL; 4737 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 4738 flags |= PIPE_CONTROL_FLUSH_ENABLE; 4739 4740 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4741 flags |= PIPE_CONTROL_QW_WRITE; 4742 4743 flags |= PIPE_CONTROL_CS_STALL; 4744 4745 cs = intel_ring_begin(request, 6); 4746 if (IS_ERR(cs)) 4747 return PTR_ERR(cs); 4748 4749 cs = gen12_emit_pipe_control(cs, 4750 PIPE_CONTROL0_HDC_PIPELINE_FLUSH, 4751 flags, LRC_PPHWSP_SCRATCH_ADDR); 4752 intel_ring_advance(request, cs); 4753 } 4754 4755 if (mode & EMIT_INVALIDATE) { 4756 u32 flags = 0; 4757 u32 *cs; 4758 4759 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 4760 flags |= PIPE_CONTROL_TLB_INVALIDATE; 4761 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 4762 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 4763 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 4764 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 4765 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 4766 4767 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 4768 flags |= PIPE_CONTROL_QW_WRITE; 4769 4770 flags |= PIPE_CONTROL_CS_STALL; 4771 4772 cs = intel_ring_begin(request, 8 + 4); 4773 if (IS_ERR(cs)) 4774 return PTR_ERR(cs); 4775 4776 /* 4777 * Prevent the pre-parser from skipping past the TLB 4778 * invalidate and loading a stale page for the batch 4779 * buffer / request payload. 4780 */ 4781 *cs++ = preparser_disable(true); 4782 4783 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 4784 4785 /* hsdes: 1809175790 */ 4786 cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs); 4787 4788 *cs++ = preparser_disable(false); 4789 intel_ring_advance(request, cs); 4790 } 4791 4792 return 0; 4793 } 4794 4795 static int gen12_emit_flush(struct i915_request *request, u32 mode) 4796 { 4797 intel_engine_mask_t aux_inv = 0; 4798 u32 cmd, *cs; 4799 4800 cmd = 4; 4801 if (mode & EMIT_INVALIDATE) 4802 cmd += 2; 4803 if (mode & EMIT_INVALIDATE) 4804 aux_inv = request->engine->mask & ~BIT(BCS0); 4805 if (aux_inv) 4806 cmd += 2 * hweight8(aux_inv) + 2; 4807 4808 cs = intel_ring_begin(request, cmd); 4809 if (IS_ERR(cs)) 4810 return PTR_ERR(cs); 4811 4812 if (mode & EMIT_INVALIDATE) 4813 *cs++ = preparser_disable(true); 4814 4815 cmd = MI_FLUSH_DW + 1; 4816 4817 /* We always require a command barrier so that subsequent 4818 * commands, such as breadcrumb interrupts, are strictly ordered 4819 * wrt the contents of the write cache being flushed to memory 4820 * (and thus being coherent from the CPU). 4821 */ 4822 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 4823 4824 if (mode & EMIT_INVALIDATE) { 4825 cmd |= MI_INVALIDATE_TLB; 4826 if (request->engine->class == VIDEO_DECODE_CLASS) 4827 cmd |= MI_INVALIDATE_BSD; 4828 } 4829 4830 *cs++ = cmd; 4831 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 4832 *cs++ = 0; /* upper addr */ 4833 *cs++ = 0; /* value */ 4834 4835 if (aux_inv) { /* hsdes: 1809175790 */ 4836 struct intel_engine_cs *engine; 4837 unsigned int tmp; 4838 4839 *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv)); 4840 for_each_engine_masked(engine, request->engine->gt, 4841 aux_inv, tmp) { 4842 *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine)); 4843 *cs++ = AUX_INV; 4844 } 4845 *cs++ = MI_NOOP; 4846 } 4847 4848 if (mode & EMIT_INVALIDATE) 4849 *cs++ = preparser_disable(false); 4850 4851 intel_ring_advance(request, cs); 4852 4853 return 0; 4854 } 4855 4856 static void assert_request_valid(struct i915_request *rq) 4857 { 4858 struct intel_ring *ring __maybe_unused = rq->ring; 4859 4860 /* Can we unwind this request without appearing to go forwards? */ 4861 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); 4862 } 4863 4864 /* 4865 * Reserve space for 2 NOOPs at the end of each request to be 4866 * used as a workaround for not being allowed to do lite 4867 * restore with HEAD==TAIL (WaIdleLiteRestore). 4868 */ 4869 static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) 4870 { 4871 /* Ensure there's always at least one preemption point per-request. */ 4872 *cs++ = MI_ARB_CHECK; 4873 *cs++ = MI_NOOP; 4874 request->wa_tail = intel_ring_offset(request, cs); 4875 4876 /* Check that entire request is less than half the ring */ 4877 assert_request_valid(request); 4878 4879 return cs; 4880 } 4881 4882 static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) 4883 { 4884 *cs++ = MI_SEMAPHORE_WAIT | 4885 MI_SEMAPHORE_GLOBAL_GTT | 4886 MI_SEMAPHORE_POLL | 4887 MI_SEMAPHORE_SAD_EQ_SDD; 4888 *cs++ = 0; 4889 *cs++ = intel_hws_preempt_address(request->engine); 4890 *cs++ = 0; 4891 4892 return cs; 4893 } 4894 4895 static __always_inline u32* 4896 gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) 4897 { 4898 *cs++ = MI_USER_INTERRUPT; 4899 4900 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4901 if (intel_engine_has_semaphores(request->engine)) 4902 cs = emit_preempt_busywait(request, cs); 4903 4904 request->tail = intel_ring_offset(request, cs); 4905 assert_ring_tail_valid(request->ring, request->tail); 4906 4907 return gen8_emit_wa_tail(request, cs); 4908 } 4909 4910 static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs) 4911 { 4912 u32 addr = i915_request_active_timeline(request)->hwsp_offset; 4913 4914 return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0); 4915 } 4916 4917 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) 4918 { 4919 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); 4920 } 4921 4922 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 4923 { 4924 cs = gen8_emit_pipe_control(cs, 4925 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 4926 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 4927 PIPE_CONTROL_DC_FLUSH_ENABLE, 4928 0); 4929 4930 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ 4931 cs = gen8_emit_ggtt_write_rcs(cs, 4932 request->fence.seqno, 4933 i915_request_active_timeline(request)->hwsp_offset, 4934 PIPE_CONTROL_FLUSH_ENABLE | 4935 PIPE_CONTROL_CS_STALL); 4936 4937 return gen8_emit_fini_breadcrumb_tail(request, cs); 4938 } 4939 4940 static u32 * 4941 gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 4942 { 4943 cs = gen8_emit_ggtt_write_rcs(cs, 4944 request->fence.seqno, 4945 i915_request_active_timeline(request)->hwsp_offset, 4946 PIPE_CONTROL_CS_STALL | 4947 PIPE_CONTROL_TILE_CACHE_FLUSH | 4948 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 4949 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 4950 PIPE_CONTROL_DC_FLUSH_ENABLE | 4951 PIPE_CONTROL_FLUSH_ENABLE); 4952 4953 return gen8_emit_fini_breadcrumb_tail(request, cs); 4954 } 4955 4956 /* 4957 * Note that the CS instruction pre-parser will not stall on the breadcrumb 4958 * flush and will continue pre-fetching the instructions after it before the 4959 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at 4960 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble 4961 * of the next request before the memory has been flushed, we're guaranteed that 4962 * we won't access the batch itself too early. 4963 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, 4964 * so, if the current request is modifying an instruction in the next request on 4965 * the same intel_context, we might pre-fetch and then execute the pre-update 4966 * instruction. To avoid this, the users of self-modifying code should either 4967 * disable the parser around the code emitting the memory writes, via a new flag 4968 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For 4969 * the in-kernel use-cases we've opted to use a separate context, see 4970 * reloc_gpu() as an example. 4971 * All the above applies only to the instructions themselves. Non-inline data 4972 * used by the instructions is not pre-fetched. 4973 */ 4974 4975 static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) 4976 { 4977 *cs++ = MI_SEMAPHORE_WAIT_TOKEN | 4978 MI_SEMAPHORE_GLOBAL_GTT | 4979 MI_SEMAPHORE_POLL | 4980 MI_SEMAPHORE_SAD_EQ_SDD; 4981 *cs++ = 0; 4982 *cs++ = intel_hws_preempt_address(request->engine); 4983 *cs++ = 0; 4984 *cs++ = 0; 4985 *cs++ = MI_NOOP; 4986 4987 return cs; 4988 } 4989 4990 static __always_inline u32* 4991 gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) 4992 { 4993 *cs++ = MI_USER_INTERRUPT; 4994 4995 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 4996 if (intel_engine_has_semaphores(request->engine)) 4997 cs = gen12_emit_preempt_busywait(request, cs); 4998 4999 request->tail = intel_ring_offset(request, cs); 5000 assert_ring_tail_valid(request->ring, request->tail); 5001 5002 return gen8_emit_wa_tail(request, cs); 5003 } 5004 5005 static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) 5006 { 5007 return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); 5008 } 5009 5010 static u32 * 5011 gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) 5012 { 5013 cs = gen12_emit_ggtt_write_rcs(cs, 5014 request->fence.seqno, 5015 i915_request_active_timeline(request)->hwsp_offset, 5016 PIPE_CONTROL0_HDC_PIPELINE_FLUSH, 5017 PIPE_CONTROL_CS_STALL | 5018 PIPE_CONTROL_TILE_CACHE_FLUSH | 5019 PIPE_CONTROL_FLUSH_L3 | 5020 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 5021 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 5022 /* Wa_1409600907:tgl */ 5023 PIPE_CONTROL_DEPTH_STALL | 5024 PIPE_CONTROL_DC_FLUSH_ENABLE | 5025 PIPE_CONTROL_FLUSH_ENABLE); 5026 5027 return gen12_emit_fini_breadcrumb_tail(request, cs); 5028 } 5029 5030 static void execlists_park(struct intel_engine_cs *engine) 5031 { 5032 cancel_timer(&engine->execlists.timer); 5033 cancel_timer(&engine->execlists.preempt); 5034 } 5035 5036 void intel_execlists_set_default_submission(struct intel_engine_cs *engine) 5037 { 5038 engine->submit_request = execlists_submit_request; 5039 engine->schedule = i915_schedule; 5040 engine->execlists.tasklet.func = execlists_submission_tasklet; 5041 5042 engine->reset.prepare = execlists_reset_prepare; 5043 engine->reset.rewind = execlists_reset_rewind; 5044 engine->reset.cancel = execlists_reset_cancel; 5045 engine->reset.finish = execlists_reset_finish; 5046 5047 engine->park = execlists_park; 5048 engine->unpark = NULL; 5049 5050 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 5051 if (!intel_vgpu_active(engine->i915)) { 5052 engine->flags |= I915_ENGINE_HAS_SEMAPHORES; 5053 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 5054 engine->flags |= I915_ENGINE_HAS_PREEMPTION; 5055 if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) 5056 engine->flags |= I915_ENGINE_HAS_TIMESLICES; 5057 } 5058 } 5059 5060 if (INTEL_GEN(engine->i915) >= 12) 5061 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; 5062 5063 if (intel_engine_has_preemption(engine)) 5064 engine->emit_bb_start = gen8_emit_bb_start; 5065 else 5066 engine->emit_bb_start = gen8_emit_bb_start_noarb; 5067 } 5068 5069 static void execlists_shutdown(struct intel_engine_cs *engine) 5070 { 5071 /* Synchronise with residual timers and any softirq they raise */ 5072 del_timer_sync(&engine->execlists.timer); 5073 del_timer_sync(&engine->execlists.preempt); 5074 tasklet_kill(&engine->execlists.tasklet); 5075 } 5076 5077 static void execlists_release(struct intel_engine_cs *engine) 5078 { 5079 engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ 5080 5081 execlists_shutdown(engine); 5082 5083 intel_engine_cleanup_common(engine); 5084 lrc_destroy_wa_ctx(engine); 5085 } 5086 5087 static void 5088 logical_ring_default_vfuncs(struct intel_engine_cs *engine) 5089 { 5090 /* Default vfuncs which can be overriden by each engine. */ 5091 5092 engine->resume = execlists_resume; 5093 5094 engine->cops = &execlists_context_ops; 5095 engine->request_alloc = execlists_request_alloc; 5096 5097 engine->emit_flush = gen8_emit_flush; 5098 engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; 5099 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; 5100 if (INTEL_GEN(engine->i915) >= 12) { 5101 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; 5102 engine->emit_flush = gen12_emit_flush; 5103 } 5104 engine->set_default_submission = intel_execlists_set_default_submission; 5105 5106 if (INTEL_GEN(engine->i915) < 11) { 5107 engine->irq_enable = gen8_logical_ring_enable_irq; 5108 engine->irq_disable = gen8_logical_ring_disable_irq; 5109 } else { 5110 /* 5111 * TODO: On Gen11 interrupt masks need to be clear 5112 * to allow C6 entry. Keep interrupts enabled at 5113 * and take the hit of generating extra interrupts 5114 * until a more refined solution exists. 5115 */ 5116 } 5117 } 5118 5119 static inline void 5120 logical_ring_default_irqs(struct intel_engine_cs *engine) 5121 { 5122 unsigned int shift = 0; 5123 5124 if (INTEL_GEN(engine->i915) < 11) { 5125 const u8 irq_shifts[] = { 5126 [RCS0] = GEN8_RCS_IRQ_SHIFT, 5127 [BCS0] = GEN8_BCS_IRQ_SHIFT, 5128 [VCS0] = GEN8_VCS0_IRQ_SHIFT, 5129 [VCS1] = GEN8_VCS1_IRQ_SHIFT, 5130 [VECS0] = GEN8_VECS_IRQ_SHIFT, 5131 }; 5132 5133 shift = irq_shifts[engine->id]; 5134 } 5135 5136 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; 5137 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; 5138 engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; 5139 engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; 5140 } 5141 5142 static void rcs_submission_override(struct intel_engine_cs *engine) 5143 { 5144 switch (INTEL_GEN(engine->i915)) { 5145 case 12: 5146 engine->emit_flush = gen12_emit_flush_render; 5147 engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; 5148 break; 5149 case 11: 5150 engine->emit_flush = gen11_emit_flush_render; 5151 engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; 5152 break; 5153 default: 5154 engine->emit_flush = gen8_emit_flush_render; 5155 engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; 5156 break; 5157 } 5158 } 5159 5160 int intel_execlists_submission_setup(struct intel_engine_cs *engine) 5161 { 5162 struct intel_engine_execlists * const execlists = &engine->execlists; 5163 struct drm_i915_private *i915 = engine->i915; 5164 struct intel_uncore *uncore = engine->uncore; 5165 u32 base = engine->mmio_base; 5166 5167 tasklet_init(&engine->execlists.tasklet, 5168 execlists_submission_tasklet, (unsigned long)engine); 5169 timer_setup(&engine->execlists.timer, execlists_timeslice, 0); 5170 timer_setup(&engine->execlists.preempt, execlists_preempt, 0); 5171 5172 logical_ring_default_vfuncs(engine); 5173 logical_ring_default_irqs(engine); 5174 5175 if (engine->class == RENDER_CLASS) 5176 rcs_submission_override(engine); 5177 5178 if (intel_init_workaround_bb(engine)) 5179 /* 5180 * We continue even if we fail to initialize WA batch 5181 * because we only expect rare glitches but nothing 5182 * critical to prevent us from using GPU 5183 */ 5184 drm_err(&i915->drm, "WA batch buffer initialization failed\n"); 5185 5186 if (HAS_LOGICAL_RING_ELSQ(i915)) { 5187 execlists->submit_reg = uncore->regs + 5188 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); 5189 execlists->ctrl_reg = uncore->regs + 5190 i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); 5191 } else { 5192 execlists->submit_reg = uncore->regs + 5193 i915_mmio_reg_offset(RING_ELSP(base)); 5194 } 5195 5196 execlists->csb_status = 5197 (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 5198 5199 execlists->csb_write = 5200 &engine->status_page.addr[intel_hws_csb_write_index(i915)]; 5201 5202 if (INTEL_GEN(i915) < 11) 5203 execlists->csb_size = GEN8_CSB_ENTRIES; 5204 else 5205 execlists->csb_size = GEN11_CSB_ENTRIES; 5206 5207 if (INTEL_GEN(engine->i915) >= 11) { 5208 execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); 5209 execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); 5210 } 5211 5212 /* Finally, take ownership and responsibility for cleanup! */ 5213 engine->sanitize = execlists_sanitize; 5214 engine->release = execlists_release; 5215 5216 return 0; 5217 } 5218 5219 static void init_common_reg_state(u32 * const regs, 5220 const struct intel_engine_cs *engine, 5221 const struct intel_ring *ring, 5222 bool inhibit) 5223 { 5224 u32 ctl; 5225 5226 ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); 5227 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 5228 if (inhibit) 5229 ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; 5230 if (INTEL_GEN(engine->i915) < 11) 5231 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | 5232 CTX_CTRL_RS_CTX_ENABLE); 5233 regs[CTX_CONTEXT_CONTROL] = ctl; 5234 5235 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; 5236 regs[CTX_TIMESTAMP] = 0; 5237 } 5238 5239 static void init_wa_bb_reg_state(u32 * const regs, 5240 const struct intel_engine_cs *engine) 5241 { 5242 const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; 5243 5244 if (wa_ctx->per_ctx.size) { 5245 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); 5246 5247 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); 5248 regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = 5249 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; 5250 } 5251 5252 if (wa_ctx->indirect_ctx.size) { 5253 lrc_ring_setup_indirect_ctx(regs, engine, 5254 i915_ggtt_offset(wa_ctx->vma) + 5255 wa_ctx->indirect_ctx.offset, 5256 wa_ctx->indirect_ctx.size); 5257 } 5258 } 5259 5260 static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) 5261 { 5262 if (i915_vm_is_4lvl(&ppgtt->vm)) { 5263 /* 64b PPGTT (48bit canonical) 5264 * PDP0_DESCRIPTOR contains the base address to PML4 and 5265 * other PDP Descriptors are ignored. 5266 */ 5267 ASSIGN_CTX_PML4(ppgtt, regs); 5268 } else { 5269 ASSIGN_CTX_PDP(ppgtt, regs, 3); 5270 ASSIGN_CTX_PDP(ppgtt, regs, 2); 5271 ASSIGN_CTX_PDP(ppgtt, regs, 1); 5272 ASSIGN_CTX_PDP(ppgtt, regs, 0); 5273 } 5274 } 5275 5276 static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) 5277 { 5278 if (i915_is_ggtt(vm)) 5279 return i915_vm_to_ggtt(vm)->alias; 5280 else 5281 return i915_vm_to_ppgtt(vm); 5282 } 5283 5284 static void execlists_init_reg_state(u32 *regs, 5285 const struct intel_context *ce, 5286 const struct intel_engine_cs *engine, 5287 const struct intel_ring *ring, 5288 bool inhibit) 5289 { 5290 /* 5291 * A context is actually a big batch buffer with several 5292 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The 5293 * values we are setting here are only for the first context restore: 5294 * on a subsequent save, the GPU will recreate this batchbuffer with new 5295 * values (including all the missing MI_LOAD_REGISTER_IMM commands that 5296 * we are not initializing here). 5297 * 5298 * Must keep consistent with virtual_update_register_offsets(). 5299 */ 5300 set_offsets(regs, reg_offsets(engine), engine, inhibit); 5301 5302 init_common_reg_state(regs, engine, ring, inhibit); 5303 init_ppgtt_reg_state(regs, vm_alias(ce->vm)); 5304 5305 init_wa_bb_reg_state(regs, engine); 5306 5307 __reset_stop_ring(regs, engine); 5308 } 5309 5310 static int 5311 populate_lr_context(struct intel_context *ce, 5312 struct drm_i915_gem_object *ctx_obj, 5313 struct intel_engine_cs *engine, 5314 struct intel_ring *ring) 5315 { 5316 bool inhibit = true; 5317 void *vaddr; 5318 5319 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); 5320 if (IS_ERR(vaddr)) { 5321 drm_dbg(&engine->i915->drm, "Could not map object pages!\n"); 5322 return PTR_ERR(vaddr); 5323 } 5324 5325 set_redzone(vaddr, engine); 5326 5327 if (engine->default_state) { 5328 shmem_read(engine->default_state, 0, 5329 vaddr, engine->context_size); 5330 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 5331 inhibit = false; 5332 } 5333 5334 /* Clear the ppHWSP (inc. per-context counters) */ 5335 memset(vaddr, 0, PAGE_SIZE); 5336 5337 /* 5338 * The second page of the context object contains some registers which 5339 * must be set up prior to the first execution. 5340 */ 5341 execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, 5342 ce, engine, ring, inhibit); 5343 5344 __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); 5345 i915_gem_object_unpin_map(ctx_obj); 5346 return 0; 5347 } 5348 5349 static struct intel_timeline *pinned_timeline(struct intel_context *ce) 5350 { 5351 struct intel_timeline *tl = fetch_and_zero(&ce->timeline); 5352 5353 return intel_timeline_create_from_engine(ce->engine, 5354 page_unmask_bits(tl)); 5355 } 5356 5357 static int __execlists_context_alloc(struct intel_context *ce, 5358 struct intel_engine_cs *engine) 5359 { 5360 struct drm_i915_gem_object *ctx_obj; 5361 struct intel_ring *ring; 5362 struct i915_vma *vma; 5363 u32 context_size; 5364 int ret; 5365 5366 GEM_BUG_ON(ce->state); 5367 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); 5368 5369 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 5370 context_size += I915_GTT_PAGE_SIZE; /* for redzone */ 5371 5372 if (INTEL_GEN(engine->i915) == 12) { 5373 ce->wa_bb_page = context_size / PAGE_SIZE; 5374 context_size += PAGE_SIZE; 5375 } 5376 5377 ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); 5378 if (IS_ERR(ctx_obj)) 5379 return PTR_ERR(ctx_obj); 5380 5381 vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); 5382 if (IS_ERR(vma)) { 5383 ret = PTR_ERR(vma); 5384 goto error_deref_obj; 5385 } 5386 5387 if (!page_mask_bits(ce->timeline)) { 5388 struct intel_timeline *tl; 5389 5390 /* 5391 * Use the static global HWSP for the kernel context, and 5392 * a dynamically allocated cacheline for everyone else. 5393 */ 5394 if (unlikely(ce->timeline)) 5395 tl = pinned_timeline(ce); 5396 else 5397 tl = intel_timeline_create(engine->gt); 5398 if (IS_ERR(tl)) { 5399 ret = PTR_ERR(tl); 5400 goto error_deref_obj; 5401 } 5402 5403 ce->timeline = tl; 5404 } 5405 5406 ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); 5407 if (IS_ERR(ring)) { 5408 ret = PTR_ERR(ring); 5409 goto error_deref_obj; 5410 } 5411 5412 ret = populate_lr_context(ce, ctx_obj, engine, ring); 5413 if (ret) { 5414 drm_dbg(&engine->i915->drm, 5415 "Failed to populate LRC: %d\n", ret); 5416 goto error_ring_free; 5417 } 5418 5419 ce->ring = ring; 5420 ce->state = vma; 5421 5422 return 0; 5423 5424 error_ring_free: 5425 intel_ring_put(ring); 5426 error_deref_obj: 5427 i915_gem_object_put(ctx_obj); 5428 return ret; 5429 } 5430 5431 static struct list_head *virtual_queue(struct virtual_engine *ve) 5432 { 5433 return &ve->base.execlists.default_priolist.requests[0]; 5434 } 5435 5436 static void virtual_context_destroy(struct kref *kref) 5437 { 5438 struct virtual_engine *ve = 5439 container_of(kref, typeof(*ve), context.ref); 5440 unsigned int n; 5441 5442 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 5443 GEM_BUG_ON(ve->request); 5444 GEM_BUG_ON(ve->context.inflight); 5445 5446 for (n = 0; n < ve->num_siblings; n++) { 5447 struct intel_engine_cs *sibling = ve->siblings[n]; 5448 struct rb_node *node = &ve->nodes[sibling->id].rb; 5449 unsigned long flags; 5450 5451 if (RB_EMPTY_NODE(node)) 5452 continue; 5453 5454 spin_lock_irqsave(&sibling->active.lock, flags); 5455 5456 /* Detachment is lazily performed in the execlists tasklet */ 5457 if (!RB_EMPTY_NODE(node)) 5458 rb_erase_cached(node, &sibling->execlists.virtual); 5459 5460 spin_unlock_irqrestore(&sibling->active.lock, flags); 5461 } 5462 GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); 5463 5464 if (ve->context.state) 5465 __execlists_context_fini(&ve->context); 5466 intel_context_fini(&ve->context); 5467 5468 intel_engine_free_request_pool(&ve->base); 5469 5470 kfree(ve->bonds); 5471 kfree(ve); 5472 } 5473 5474 static void virtual_engine_initial_hint(struct virtual_engine *ve) 5475 { 5476 int swp; 5477 5478 /* 5479 * Pick a random sibling on starting to help spread the load around. 5480 * 5481 * New contexts are typically created with exactly the same order 5482 * of siblings, and often started in batches. Due to the way we iterate 5483 * the array of sibling when submitting requests, sibling[0] is 5484 * prioritised for dequeuing. If we make sure that sibling[0] is fairly 5485 * randomised across the system, we also help spread the load by the 5486 * first engine we inspect being different each time. 5487 * 5488 * NB This does not force us to execute on this engine, it will just 5489 * typically be the first we inspect for submission. 5490 */ 5491 swp = prandom_u32_max(ve->num_siblings); 5492 if (swp) 5493 swap(ve->siblings[swp], ve->siblings[0]); 5494 } 5495 5496 static int virtual_context_alloc(struct intel_context *ce) 5497 { 5498 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5499 5500 return __execlists_context_alloc(ce, ve->siblings[0]); 5501 } 5502 5503 static int virtual_context_pin(struct intel_context *ce, void *vaddr) 5504 { 5505 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5506 5507 /* Note: we must use a real engine class for setting up reg state */ 5508 return __execlists_context_pin(ce, ve->siblings[0], vaddr); 5509 } 5510 5511 static void virtual_context_enter(struct intel_context *ce) 5512 { 5513 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5514 unsigned int n; 5515 5516 for (n = 0; n < ve->num_siblings; n++) 5517 intel_engine_pm_get(ve->siblings[n]); 5518 5519 intel_timeline_enter(ce->timeline); 5520 } 5521 5522 static void virtual_context_exit(struct intel_context *ce) 5523 { 5524 struct virtual_engine *ve = container_of(ce, typeof(*ve), context); 5525 unsigned int n; 5526 5527 intel_timeline_exit(ce->timeline); 5528 5529 for (n = 0; n < ve->num_siblings; n++) 5530 intel_engine_pm_put(ve->siblings[n]); 5531 } 5532 5533 static const struct intel_context_ops virtual_context_ops = { 5534 .alloc = virtual_context_alloc, 5535 5536 .pre_pin = execlists_context_pre_pin, 5537 .pin = virtual_context_pin, 5538 .unpin = execlists_context_unpin, 5539 .post_unpin = execlists_context_post_unpin, 5540 5541 .enter = virtual_context_enter, 5542 .exit = virtual_context_exit, 5543 5544 .destroy = virtual_context_destroy, 5545 }; 5546 5547 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) 5548 { 5549 struct i915_request *rq; 5550 intel_engine_mask_t mask; 5551 5552 rq = READ_ONCE(ve->request); 5553 if (!rq) 5554 return 0; 5555 5556 /* The rq is ready for submission; rq->execution_mask is now stable. */ 5557 mask = rq->execution_mask; 5558 if (unlikely(!mask)) { 5559 /* Invalid selection, submit to a random engine in error */ 5560 i915_request_set_error_once(rq, -ENODEV); 5561 mask = ve->siblings[0]->mask; 5562 } 5563 5564 ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", 5565 rq->fence.context, rq->fence.seqno, 5566 mask, ve->base.execlists.queue_priority_hint); 5567 5568 return mask; 5569 } 5570 5571 static void virtual_submission_tasklet(unsigned long data) 5572 { 5573 struct virtual_engine * const ve = (struct virtual_engine *)data; 5574 const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); 5575 intel_engine_mask_t mask; 5576 unsigned int n; 5577 5578 rcu_read_lock(); 5579 mask = virtual_submission_mask(ve); 5580 rcu_read_unlock(); 5581 if (unlikely(!mask)) 5582 return; 5583 5584 local_irq_disable(); 5585 for (n = 0; n < ve->num_siblings; n++) { 5586 struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); 5587 struct ve_node * const node = &ve->nodes[sibling->id]; 5588 struct rb_node **parent, *rb; 5589 bool first; 5590 5591 if (!READ_ONCE(ve->request)) 5592 break; /* already handled by a sibling's tasklet */ 5593 5594 if (unlikely(!(mask & sibling->mask))) { 5595 if (!RB_EMPTY_NODE(&node->rb)) { 5596 spin_lock(&sibling->active.lock); 5597 rb_erase_cached(&node->rb, 5598 &sibling->execlists.virtual); 5599 RB_CLEAR_NODE(&node->rb); 5600 spin_unlock(&sibling->active.lock); 5601 } 5602 continue; 5603 } 5604 5605 spin_lock(&sibling->active.lock); 5606 5607 if (!RB_EMPTY_NODE(&node->rb)) { 5608 /* 5609 * Cheat and avoid rebalancing the tree if we can 5610 * reuse this node in situ. 5611 */ 5612 first = rb_first_cached(&sibling->execlists.virtual) == 5613 &node->rb; 5614 if (prio == node->prio || (prio > node->prio && first)) 5615 goto submit_engine; 5616 5617 rb_erase_cached(&node->rb, &sibling->execlists.virtual); 5618 } 5619 5620 rb = NULL; 5621 first = true; 5622 parent = &sibling->execlists.virtual.rb_root.rb_node; 5623 while (*parent) { 5624 struct ve_node *other; 5625 5626 rb = *parent; 5627 other = rb_entry(rb, typeof(*other), rb); 5628 if (prio > other->prio) { 5629 parent = &rb->rb_left; 5630 } else { 5631 parent = &rb->rb_right; 5632 first = false; 5633 } 5634 } 5635 5636 rb_link_node(&node->rb, rb, parent); 5637 rb_insert_color_cached(&node->rb, 5638 &sibling->execlists.virtual, 5639 first); 5640 5641 submit_engine: 5642 GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); 5643 node->prio = prio; 5644 if (first && prio > sibling->execlists.queue_priority_hint) 5645 tasklet_hi_schedule(&sibling->execlists.tasklet); 5646 5647 spin_unlock(&sibling->active.lock); 5648 } 5649 local_irq_enable(); 5650 } 5651 5652 static void virtual_submit_request(struct i915_request *rq) 5653 { 5654 struct virtual_engine *ve = to_virtual_engine(rq->engine); 5655 struct i915_request *old; 5656 unsigned long flags; 5657 5658 ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", 5659 rq->fence.context, 5660 rq->fence.seqno); 5661 5662 GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); 5663 5664 spin_lock_irqsave(&ve->base.active.lock, flags); 5665 5666 old = ve->request; 5667 if (old) { /* background completion event from preempt-to-busy */ 5668 GEM_BUG_ON(!i915_request_completed(old)); 5669 __i915_request_submit(old); 5670 i915_request_put(old); 5671 } 5672 5673 if (i915_request_completed(rq)) { 5674 __i915_request_submit(rq); 5675 5676 ve->base.execlists.queue_priority_hint = INT_MIN; 5677 ve->request = NULL; 5678 } else { 5679 ve->base.execlists.queue_priority_hint = rq_prio(rq); 5680 ve->request = i915_request_get(rq); 5681 5682 GEM_BUG_ON(!list_empty(virtual_queue(ve))); 5683 list_move_tail(&rq->sched.link, virtual_queue(ve)); 5684 5685 tasklet_hi_schedule(&ve->base.execlists.tasklet); 5686 } 5687 5688 spin_unlock_irqrestore(&ve->base.active.lock, flags); 5689 } 5690 5691 static struct ve_bond * 5692 virtual_find_bond(struct virtual_engine *ve, 5693 const struct intel_engine_cs *master) 5694 { 5695 int i; 5696 5697 for (i = 0; i < ve->num_bonds; i++) { 5698 if (ve->bonds[i].master == master) 5699 return &ve->bonds[i]; 5700 } 5701 5702 return NULL; 5703 } 5704 5705 static void 5706 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) 5707 { 5708 struct virtual_engine *ve = to_virtual_engine(rq->engine); 5709 intel_engine_mask_t allowed, exec; 5710 struct ve_bond *bond; 5711 5712 allowed = ~to_request(signal)->engine->mask; 5713 5714 bond = virtual_find_bond(ve, to_request(signal)->engine); 5715 if (bond) 5716 allowed &= bond->sibling_mask; 5717 5718 /* Restrict the bonded request to run on only the available engines */ 5719 exec = READ_ONCE(rq->execution_mask); 5720 while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) 5721 ; 5722 5723 /* Prevent the master from being re-run on the bonded engines */ 5724 to_request(signal)->execution_mask &= ~allowed; 5725 } 5726 5727 struct intel_context * 5728 intel_execlists_create_virtual(struct intel_engine_cs **siblings, 5729 unsigned int count) 5730 { 5731 struct virtual_engine *ve; 5732 unsigned int n; 5733 int err; 5734 5735 if (count == 0) 5736 return ERR_PTR(-EINVAL); 5737 5738 if (count == 1) 5739 return intel_context_create(siblings[0]); 5740 5741 ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); 5742 if (!ve) 5743 return ERR_PTR(-ENOMEM); 5744 5745 ve->base.i915 = siblings[0]->i915; 5746 ve->base.gt = siblings[0]->gt; 5747 ve->base.uncore = siblings[0]->uncore; 5748 ve->base.id = -1; 5749 5750 ve->base.class = OTHER_CLASS; 5751 ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; 5752 ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5753 ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; 5754 5755 /* 5756 * The decision on whether to submit a request using semaphores 5757 * depends on the saturated state of the engine. We only compute 5758 * this during HW submission of the request, and we need for this 5759 * state to be globally applied to all requests being submitted 5760 * to this engine. Virtual engines encompass more than one physical 5761 * engine and so we cannot accurately tell in advance if one of those 5762 * engines is already saturated and so cannot afford to use a semaphore 5763 * and be pessimized in priority for doing so -- if we are the only 5764 * context using semaphores after all other clients have stopped, we 5765 * will be starved on the saturated system. Such a global switch for 5766 * semaphores is less than ideal, but alas is the current compromise. 5767 */ 5768 ve->base.saturated = ALL_ENGINES; 5769 5770 snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); 5771 5772 intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); 5773 intel_engine_init_execlists(&ve->base); 5774 5775 ve->base.cops = &virtual_context_ops; 5776 ve->base.request_alloc = execlists_request_alloc; 5777 5778 ve->base.schedule = i915_schedule; 5779 ve->base.submit_request = virtual_submit_request; 5780 ve->base.bond_execute = virtual_bond_execute; 5781 5782 INIT_LIST_HEAD(virtual_queue(ve)); 5783 ve->base.execlists.queue_priority_hint = INT_MIN; 5784 tasklet_init(&ve->base.execlists.tasklet, 5785 virtual_submission_tasklet, 5786 (unsigned long)ve); 5787 5788 intel_context_init(&ve->context, &ve->base); 5789 5790 ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); 5791 if (!ve->base.breadcrumbs) { 5792 err = -ENOMEM; 5793 goto err_put; 5794 } 5795 5796 for (n = 0; n < count; n++) { 5797 struct intel_engine_cs *sibling = siblings[n]; 5798 5799 GEM_BUG_ON(!is_power_of_2(sibling->mask)); 5800 if (sibling->mask & ve->base.mask) { 5801 DRM_DEBUG("duplicate %s entry in load balancer\n", 5802 sibling->name); 5803 err = -EINVAL; 5804 goto err_put; 5805 } 5806 5807 /* 5808 * The virtual engine implementation is tightly coupled to 5809 * the execlists backend -- we push out request directly 5810 * into a tree inside each physical engine. We could support 5811 * layering if we handle cloning of the requests and 5812 * submitting a copy into each backend. 5813 */ 5814 if (sibling->execlists.tasklet.func != 5815 execlists_submission_tasklet) { 5816 err = -ENODEV; 5817 goto err_put; 5818 } 5819 5820 GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); 5821 RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); 5822 5823 ve->siblings[ve->num_siblings++] = sibling; 5824 ve->base.mask |= sibling->mask; 5825 5826 /* 5827 * All physical engines must be compatible for their emission 5828 * functions (as we build the instructions during request 5829 * construction and do not alter them before submission 5830 * on the physical engine). We use the engine class as a guide 5831 * here, although that could be refined. 5832 */ 5833 if (ve->base.class != OTHER_CLASS) { 5834 if (ve->base.class != sibling->class) { 5835 DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", 5836 sibling->class, ve->base.class); 5837 err = -EINVAL; 5838 goto err_put; 5839 } 5840 continue; 5841 } 5842 5843 ve->base.class = sibling->class; 5844 ve->base.uabi_class = sibling->uabi_class; 5845 snprintf(ve->base.name, sizeof(ve->base.name), 5846 "v%dx%d", ve->base.class, count); 5847 ve->base.context_size = sibling->context_size; 5848 5849 ve->base.emit_bb_start = sibling->emit_bb_start; 5850 ve->base.emit_flush = sibling->emit_flush; 5851 ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; 5852 ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; 5853 ve->base.emit_fini_breadcrumb_dw = 5854 sibling->emit_fini_breadcrumb_dw; 5855 5856 ve->base.flags = sibling->flags; 5857 } 5858 5859 ve->base.flags |= I915_ENGINE_IS_VIRTUAL; 5860 5861 virtual_engine_initial_hint(ve); 5862 return &ve->context; 5863 5864 err_put: 5865 intel_context_put(&ve->context); 5866 return ERR_PTR(err); 5867 } 5868 5869 struct intel_context * 5870 intel_execlists_clone_virtual(struct intel_engine_cs *src) 5871 { 5872 struct virtual_engine *se = to_virtual_engine(src); 5873 struct intel_context *dst; 5874 5875 dst = intel_execlists_create_virtual(se->siblings, 5876 se->num_siblings); 5877 if (IS_ERR(dst)) 5878 return dst; 5879 5880 if (se->num_bonds) { 5881 struct virtual_engine *de = to_virtual_engine(dst->engine); 5882 5883 de->bonds = kmemdup(se->bonds, 5884 sizeof(*se->bonds) * se->num_bonds, 5885 GFP_KERNEL); 5886 if (!de->bonds) { 5887 intel_context_put(dst); 5888 return ERR_PTR(-ENOMEM); 5889 } 5890 5891 de->num_bonds = se->num_bonds; 5892 } 5893 5894 return dst; 5895 } 5896 5897 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, 5898 const struct intel_engine_cs *master, 5899 const struct intel_engine_cs *sibling) 5900 { 5901 struct virtual_engine *ve = to_virtual_engine(engine); 5902 struct ve_bond *bond; 5903 int n; 5904 5905 /* Sanity check the sibling is part of the virtual engine */ 5906 for (n = 0; n < ve->num_siblings; n++) 5907 if (sibling == ve->siblings[n]) 5908 break; 5909 if (n == ve->num_siblings) 5910 return -EINVAL; 5911 5912 bond = virtual_find_bond(ve, master); 5913 if (bond) { 5914 bond->sibling_mask |= sibling->mask; 5915 return 0; 5916 } 5917 5918 bond = krealloc(ve->bonds, 5919 sizeof(*bond) * (ve->num_bonds + 1), 5920 GFP_KERNEL); 5921 if (!bond) 5922 return -ENOMEM; 5923 5924 bond[ve->num_bonds].master = master; 5925 bond[ve->num_bonds].sibling_mask = sibling->mask; 5926 5927 ve->bonds = bond; 5928 ve->num_bonds++; 5929 5930 return 0; 5931 } 5932 5933 struct intel_engine_cs * 5934 intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, 5935 unsigned int sibling) 5936 { 5937 struct virtual_engine *ve = to_virtual_engine(engine); 5938 5939 if (sibling >= ve->num_siblings) 5940 return NULL; 5941 5942 return ve->siblings[sibling]; 5943 } 5944 5945 void intel_execlists_show_requests(struct intel_engine_cs *engine, 5946 struct drm_printer *m, 5947 void (*show_request)(struct drm_printer *m, 5948 struct i915_request *rq, 5949 const char *prefix), 5950 unsigned int max) 5951 { 5952 const struct intel_engine_execlists *execlists = &engine->execlists; 5953 struct i915_request *rq, *last; 5954 unsigned long flags; 5955 unsigned int count; 5956 struct rb_node *rb; 5957 5958 spin_lock_irqsave(&engine->active.lock, flags); 5959 5960 last = NULL; 5961 count = 0; 5962 list_for_each_entry(rq, &engine->active.requests, sched.link) { 5963 if (count++ < max - 1) 5964 show_request(m, rq, "\t\tE "); 5965 else 5966 last = rq; 5967 } 5968 if (last) { 5969 if (count > max) { 5970 drm_printf(m, 5971 "\t\t...skipping %d executing requests...\n", 5972 count - max); 5973 } 5974 show_request(m, last, "\t\tE "); 5975 } 5976 5977 if (execlists->switch_priority_hint != INT_MIN) 5978 drm_printf(m, "\t\tSwitch priority hint: %d\n", 5979 READ_ONCE(execlists->switch_priority_hint)); 5980 if (execlists->queue_priority_hint != INT_MIN) 5981 drm_printf(m, "\t\tQueue priority hint: %d\n", 5982 READ_ONCE(execlists->queue_priority_hint)); 5983 5984 last = NULL; 5985 count = 0; 5986 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { 5987 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); 5988 int i; 5989 5990 priolist_for_each_request(rq, p, i) { 5991 if (count++ < max - 1) 5992 show_request(m, rq, "\t\tQ "); 5993 else 5994 last = rq; 5995 } 5996 } 5997 if (last) { 5998 if (count > max) { 5999 drm_printf(m, 6000 "\t\t...skipping %d queued requests...\n", 6001 count - max); 6002 } 6003 show_request(m, last, "\t\tQ "); 6004 } 6005 6006 last = NULL; 6007 count = 0; 6008 for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { 6009 struct virtual_engine *ve = 6010 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); 6011 struct i915_request *rq = READ_ONCE(ve->request); 6012 6013 if (rq) { 6014 if (count++ < max - 1) 6015 show_request(m, rq, "\t\tV "); 6016 else 6017 last = rq; 6018 } 6019 } 6020 if (last) { 6021 if (count > max) { 6022 drm_printf(m, 6023 "\t\t...skipping %d virtual requests...\n", 6024 count - max); 6025 } 6026 show_request(m, last, "\t\tV "); 6027 } 6028 6029 spin_unlock_irqrestore(&engine->active.lock, flags); 6030 } 6031 6032 void intel_lr_context_reset(struct intel_engine_cs *engine, 6033 struct intel_context *ce, 6034 u32 head, 6035 bool scrub) 6036 { 6037 GEM_BUG_ON(!intel_context_is_pinned(ce)); 6038 6039 /* 6040 * We want a simple context + ring to execute the breadcrumb update. 6041 * We cannot rely on the context being intact across the GPU hang, 6042 * so clear it and rebuild just what we need for the breadcrumb. 6043 * All pending requests for this context will be zapped, and any 6044 * future request will be after userspace has had the opportunity 6045 * to recreate its own state. 6046 */ 6047 if (scrub) 6048 restore_default_state(ce, engine); 6049 6050 /* Rerun the request; its payload has been neutered (if guilty). */ 6051 __execlists_update_reg_state(ce, engine, head); 6052 } 6053 6054 bool 6055 intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) 6056 { 6057 return engine->set_default_submission == 6058 intel_execlists_set_default_submission; 6059 } 6060 6061 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 6062 #include "selftest_lrc.c" 6063 #endif 6064