1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/dma-fence-array.h> 26 #include <linux/dma-fence-chain.h> 27 #include <linux/irq_work.h> 28 #include <linux/prefetch.h> 29 #include <linux/sched.h> 30 #include <linux/sched/clock.h> 31 #include <linux/sched/signal.h> 32 #include <linux/sched/mm.h> 33 34 #include "gem/i915_gem_context.h" 35 #include "gt/intel_breadcrumbs.h" 36 #include "gt/intel_context.h" 37 #include "gt/intel_engine.h" 38 #include "gt/intel_engine_heartbeat.h" 39 #include "gt/intel_gpu_commands.h" 40 #include "gt/intel_reset.h" 41 #include "gt/intel_ring.h" 42 #include "gt/intel_rps.h" 43 44 #include "i915_active.h" 45 #include "i915_deps.h" 46 #include "i915_drv.h" 47 #include "i915_trace.h" 48 #include "intel_pm.h" 49 50 struct execute_cb { 51 struct irq_work work; 52 struct i915_sw_fence *fence; 53 struct i915_request *signal; 54 }; 55 56 static struct kmem_cache *slab_requests; 57 static struct kmem_cache *slab_execute_cbs; 58 59 static const char *i915_fence_get_driver_name(struct dma_fence *fence) 60 { 61 return dev_name(to_request(fence)->engine->i915->drm.dev); 62 } 63 64 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 65 { 66 const struct i915_gem_context *ctx; 67 68 /* 69 * The timeline struct (as part of the ppgtt underneath a context) 70 * may be freed when the request is no longer in use by the GPU. 71 * We could extend the life of a context to beyond that of all 72 * fences, possibly keeping the hw resource around indefinitely, 73 * or we just give them a false name. Since 74 * dma_fence_ops.get_timeline_name is a debug feature, the occasional 75 * lie seems justifiable. 76 */ 77 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 78 return "signaled"; 79 80 ctx = i915_request_gem_context(to_request(fence)); 81 if (!ctx) 82 return "[" DRIVER_NAME "]"; 83 84 return ctx->name; 85 } 86 87 static bool i915_fence_signaled(struct dma_fence *fence) 88 { 89 return i915_request_completed(to_request(fence)); 90 } 91 92 static bool i915_fence_enable_signaling(struct dma_fence *fence) 93 { 94 return i915_request_enable_breadcrumb(to_request(fence)); 95 } 96 97 static signed long i915_fence_wait(struct dma_fence *fence, 98 bool interruptible, 99 signed long timeout) 100 { 101 return i915_request_wait_timeout(to_request(fence), 102 interruptible | I915_WAIT_PRIORITY, 103 timeout); 104 } 105 106 struct kmem_cache *i915_request_slab_cache(void) 107 { 108 return slab_requests; 109 } 110 111 static void i915_fence_release(struct dma_fence *fence) 112 { 113 struct i915_request *rq = to_request(fence); 114 115 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && 116 rq->guc_prio != GUC_PRIO_FINI); 117 118 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); 119 if (i915_vma_snapshot_present(&rq->batch_snapshot)) 120 i915_vma_snapshot_put_onstack(&rq->batch_snapshot); 121 122 /* 123 * The request is put onto a RCU freelist (i.e. the address 124 * is immediately reused), mark the fences as being freed now. 125 * Otherwise the debugobjects for the fences are only marked as 126 * freed when the slab cache itself is freed, and so we would get 127 * caught trying to reuse dead objects. 128 */ 129 i915_sw_fence_fini(&rq->submit); 130 i915_sw_fence_fini(&rq->semaphore); 131 132 /* 133 * Keep one request on each engine for reserved use under mempressure, 134 * do not use with virtual engines as this really is only needed for 135 * kernel contexts. 136 */ 137 if (!intel_engine_is_virtual(rq->engine) && 138 !cmpxchg(&rq->engine->request_pool, NULL, rq)) { 139 intel_context_put(rq->context); 140 return; 141 } 142 143 intel_context_put(rq->context); 144 145 kmem_cache_free(slab_requests, rq); 146 } 147 148 const struct dma_fence_ops i915_fence_ops = { 149 .get_driver_name = i915_fence_get_driver_name, 150 .get_timeline_name = i915_fence_get_timeline_name, 151 .enable_signaling = i915_fence_enable_signaling, 152 .signaled = i915_fence_signaled, 153 .wait = i915_fence_wait, 154 .release = i915_fence_release, 155 }; 156 157 static void irq_execute_cb(struct irq_work *wrk) 158 { 159 struct execute_cb *cb = container_of(wrk, typeof(*cb), work); 160 161 i915_sw_fence_complete(cb->fence); 162 kmem_cache_free(slab_execute_cbs, cb); 163 } 164 165 static __always_inline void 166 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) 167 { 168 struct execute_cb *cb, *cn; 169 170 if (llist_empty(&rq->execute_cb)) 171 return; 172 173 llist_for_each_entry_safe(cb, cn, 174 llist_del_all(&rq->execute_cb), 175 work.node.llist) 176 fn(&cb->work); 177 } 178 179 static void __notify_execute_cb_irq(struct i915_request *rq) 180 { 181 __notify_execute_cb(rq, irq_work_queue); 182 } 183 184 static bool irq_work_imm(struct irq_work *wrk) 185 { 186 wrk->func(wrk); 187 return false; 188 } 189 190 void i915_request_notify_execute_cb_imm(struct i915_request *rq) 191 { 192 __notify_execute_cb(rq, irq_work_imm); 193 } 194 195 static void __i915_request_fill(struct i915_request *rq, u8 val) 196 { 197 void *vaddr = rq->ring->vaddr; 198 u32 head; 199 200 head = rq->infix; 201 if (rq->postfix < head) { 202 memset(vaddr + head, val, rq->ring->size - head); 203 head = 0; 204 } 205 memset(vaddr + head, val, rq->postfix - head); 206 } 207 208 /** 209 * i915_request_active_engine 210 * @rq: request to inspect 211 * @active: pointer in which to return the active engine 212 * 213 * Fills the currently active engine to the @active pointer if the request 214 * is active and still not completed. 215 * 216 * Returns true if request was active or false otherwise. 217 */ 218 bool 219 i915_request_active_engine(struct i915_request *rq, 220 struct intel_engine_cs **active) 221 { 222 struct intel_engine_cs *engine, *locked; 223 bool ret = false; 224 225 /* 226 * Serialise with __i915_request_submit() so that it sees 227 * is-banned?, or we know the request is already inflight. 228 * 229 * Note that rq->engine is unstable, and so we double 230 * check that we have acquired the lock on the final engine. 231 */ 232 locked = READ_ONCE(rq->engine); 233 spin_lock_irq(&locked->sched_engine->lock); 234 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 235 spin_unlock(&locked->sched_engine->lock); 236 locked = engine; 237 spin_lock(&locked->sched_engine->lock); 238 } 239 240 if (i915_request_is_active(rq)) { 241 if (!__i915_request_is_complete(rq)) 242 *active = locked; 243 ret = true; 244 } 245 246 spin_unlock_irq(&locked->sched_engine->lock); 247 248 return ret; 249 } 250 251 static void __rq_init_watchdog(struct i915_request *rq) 252 { 253 rq->watchdog.timer.function = NULL; 254 } 255 256 static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer) 257 { 258 struct i915_request *rq = 259 container_of(hrtimer, struct i915_request, watchdog.timer); 260 struct intel_gt *gt = rq->engine->gt; 261 262 if (!i915_request_completed(rq)) { 263 if (llist_add(&rq->watchdog.link, >->watchdog.list)) 264 schedule_work(>->watchdog.work); 265 } else { 266 i915_request_put(rq); 267 } 268 269 return HRTIMER_NORESTART; 270 } 271 272 static void __rq_arm_watchdog(struct i915_request *rq) 273 { 274 struct i915_request_watchdog *wdg = &rq->watchdog; 275 struct intel_context *ce = rq->context; 276 277 if (!ce->watchdog.timeout_us) 278 return; 279 280 i915_request_get(rq); 281 282 hrtimer_init(&wdg->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 283 wdg->timer.function = __rq_watchdog_expired; 284 hrtimer_start_range_ns(&wdg->timer, 285 ns_to_ktime(ce->watchdog.timeout_us * 286 NSEC_PER_USEC), 287 NSEC_PER_MSEC, 288 HRTIMER_MODE_REL); 289 } 290 291 static void __rq_cancel_watchdog(struct i915_request *rq) 292 { 293 struct i915_request_watchdog *wdg = &rq->watchdog; 294 295 if (wdg->timer.function && hrtimer_try_to_cancel(&wdg->timer) > 0) 296 i915_request_put(rq); 297 } 298 299 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 300 301 /** 302 * i915_request_free_capture_list - Free a capture list 303 * @capture: Pointer to the first list item or NULL 304 * 305 */ 306 void i915_request_free_capture_list(struct i915_capture_list *capture) 307 { 308 while (capture) { 309 struct i915_capture_list *next = capture->next; 310 311 i915_vma_snapshot_put(capture->vma_snapshot); 312 kfree(capture); 313 capture = next; 314 } 315 } 316 317 #define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list) 318 319 #define clear_capture_list(_rq) ((_rq)->capture_list = NULL) 320 321 #else 322 323 #define i915_request_free_capture_list(_a) do {} while (0) 324 325 #define assert_capture_list_is_null(_a) do {} while (0) 326 327 #define clear_capture_list(_rq) do {} while (0) 328 329 #endif 330 331 bool i915_request_retire(struct i915_request *rq) 332 { 333 if (!__i915_request_is_complete(rq)) 334 return false; 335 336 RQ_TRACE(rq, "\n"); 337 338 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 339 trace_i915_request_retire(rq); 340 i915_request_mark_complete(rq); 341 342 __rq_cancel_watchdog(rq); 343 344 /* 345 * We know the GPU must have read the request to have 346 * sent us the seqno + interrupt, so use the position 347 * of tail of the request to update the last known position 348 * of the GPU head. 349 * 350 * Note this requires that we are always called in request 351 * completion order. 352 */ 353 GEM_BUG_ON(!list_is_first(&rq->link, 354 &i915_request_timeline(rq)->requests)); 355 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 356 /* Poison before we release our space in the ring */ 357 __i915_request_fill(rq, POISON_FREE); 358 rq->ring->head = rq->postfix; 359 360 if (!i915_request_signaled(rq)) { 361 spin_lock_irq(&rq->lock); 362 dma_fence_signal_locked(&rq->fence); 363 spin_unlock_irq(&rq->lock); 364 } 365 366 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) 367 intel_rps_dec_waiters(&rq->engine->gt->rps); 368 369 /* 370 * We only loosely track inflight requests across preemption, 371 * and so we may find ourselves attempting to retire a _completed_ 372 * request that we have removed from the HW and put back on a run 373 * queue. 374 * 375 * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be 376 * after removing the breadcrumb and signaling it, so that we do not 377 * inadvertently attach the breadcrumb to a completed request. 378 */ 379 rq->engine->remove_active_request(rq); 380 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); 381 382 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ 383 384 intel_context_exit(rq->context); 385 intel_context_unpin(rq->context); 386 387 i915_sched_node_fini(&rq->sched); 388 i915_request_put(rq); 389 390 return true; 391 } 392 393 void i915_request_retire_upto(struct i915_request *rq) 394 { 395 struct intel_timeline * const tl = i915_request_timeline(rq); 396 struct i915_request *tmp; 397 398 RQ_TRACE(rq, "\n"); 399 GEM_BUG_ON(!__i915_request_is_complete(rq)); 400 401 do { 402 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); 403 GEM_BUG_ON(!i915_request_completed(tmp)); 404 } while (i915_request_retire(tmp) && tmp != rq); 405 } 406 407 static struct i915_request * const * 408 __engine_active(struct intel_engine_cs *engine) 409 { 410 return READ_ONCE(engine->execlists.active); 411 } 412 413 static bool __request_in_flight(const struct i915_request *signal) 414 { 415 struct i915_request * const *port, *rq; 416 bool inflight = false; 417 418 if (!i915_request_is_ready(signal)) 419 return false; 420 421 /* 422 * Even if we have unwound the request, it may still be on 423 * the GPU (preempt-to-busy). If that request is inside an 424 * unpreemptible critical section, it will not be removed. Some 425 * GPU functions may even be stuck waiting for the paired request 426 * (__await_execution) to be submitted and cannot be preempted 427 * until the bond is executing. 428 * 429 * As we know that there are always preemption points between 430 * requests, we know that only the currently executing request 431 * may be still active even though we have cleared the flag. 432 * However, we can't rely on our tracking of ELSP[0] to know 433 * which request is currently active and so maybe stuck, as 434 * the tracking maybe an event behind. Instead assume that 435 * if the context is still inflight, then it is still active 436 * even if the active flag has been cleared. 437 * 438 * To further complicate matters, if there a pending promotion, the HW 439 * may either perform a context switch to the second inflight execlists, 440 * or it may switch to the pending set of execlists. In the case of the 441 * latter, it may send the ACK and we process the event copying the 442 * pending[] over top of inflight[], _overwriting_ our *active. Since 443 * this implies the HW is arbitrating and not struck in *active, we do 444 * not worry about complete accuracy, but we do require no read/write 445 * tearing of the pointer [the read of the pointer must be valid, even 446 * as the array is being overwritten, for which we require the writes 447 * to avoid tearing.] 448 * 449 * Note that the read of *execlists->active may race with the promotion 450 * of execlists->pending[] to execlists->inflight[], overwritting 451 * the value at *execlists->active. This is fine. The promotion implies 452 * that we received an ACK from the HW, and so the context is not 453 * stuck -- if we do not see ourselves in *active, the inflight status 454 * is valid. If instead we see ourselves being copied into *active, 455 * we are inflight and may signal the callback. 456 */ 457 if (!intel_context_inflight(signal->context)) 458 return false; 459 460 rcu_read_lock(); 461 for (port = __engine_active(signal->engine); 462 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ 463 port++) { 464 if (rq->context == signal->context) { 465 inflight = i915_seqno_passed(rq->fence.seqno, 466 signal->fence.seqno); 467 break; 468 } 469 } 470 rcu_read_unlock(); 471 472 return inflight; 473 } 474 475 static int 476 __await_execution(struct i915_request *rq, 477 struct i915_request *signal, 478 gfp_t gfp) 479 { 480 struct execute_cb *cb; 481 482 if (i915_request_is_active(signal)) 483 return 0; 484 485 cb = kmem_cache_alloc(slab_execute_cbs, gfp); 486 if (!cb) 487 return -ENOMEM; 488 489 cb->fence = &rq->submit; 490 i915_sw_fence_await(cb->fence); 491 init_irq_work(&cb->work, irq_execute_cb); 492 493 /* 494 * Register the callback first, then see if the signaler is already 495 * active. This ensures that if we race with the 496 * __notify_execute_cb from i915_request_submit() and we are not 497 * included in that list, we get a second bite of the cherry and 498 * execute it ourselves. After this point, a future 499 * i915_request_submit() will notify us. 500 * 501 * In i915_request_retire() we set the ACTIVE bit on a completed 502 * request (then flush the execute_cb). So by registering the 503 * callback first, then checking the ACTIVE bit, we serialise with 504 * the completed/retired request. 505 */ 506 if (llist_add(&cb->work.node.llist, &signal->execute_cb)) { 507 if (i915_request_is_active(signal) || 508 __request_in_flight(signal)) 509 i915_request_notify_execute_cb_imm(signal); 510 } 511 512 return 0; 513 } 514 515 static bool fatal_error(int error) 516 { 517 switch (error) { 518 case 0: /* not an error! */ 519 case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */ 520 case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */ 521 return false; 522 default: 523 return true; 524 } 525 } 526 527 void __i915_request_skip(struct i915_request *rq) 528 { 529 GEM_BUG_ON(!fatal_error(rq->fence.error)); 530 531 if (rq->infix == rq->postfix) 532 return; 533 534 RQ_TRACE(rq, "error: %d\n", rq->fence.error); 535 536 /* 537 * As this request likely depends on state from the lost 538 * context, clear out all the user operations leaving the 539 * breadcrumb at the end (so we get the fence notifications). 540 */ 541 __i915_request_fill(rq, 0); 542 rq->infix = rq->postfix; 543 } 544 545 bool i915_request_set_error_once(struct i915_request *rq, int error) 546 { 547 int old; 548 549 GEM_BUG_ON(!IS_ERR_VALUE((long)error)); 550 551 if (i915_request_signaled(rq)) 552 return false; 553 554 old = READ_ONCE(rq->fence.error); 555 do { 556 if (fatal_error(old)) 557 return false; 558 } while (!try_cmpxchg(&rq->fence.error, &old, error)); 559 560 return true; 561 } 562 563 struct i915_request *i915_request_mark_eio(struct i915_request *rq) 564 { 565 if (__i915_request_is_complete(rq)) 566 return NULL; 567 568 GEM_BUG_ON(i915_request_signaled(rq)); 569 570 /* As soon as the request is completed, it may be retired */ 571 rq = i915_request_get(rq); 572 573 i915_request_set_error_once(rq, -EIO); 574 i915_request_mark_complete(rq); 575 576 return rq; 577 } 578 579 bool __i915_request_submit(struct i915_request *request) 580 { 581 struct intel_engine_cs *engine = request->engine; 582 bool result = false; 583 584 RQ_TRACE(request, "\n"); 585 586 GEM_BUG_ON(!irqs_disabled()); 587 lockdep_assert_held(&engine->sched_engine->lock); 588 589 /* 590 * With the advent of preempt-to-busy, we frequently encounter 591 * requests that we have unsubmitted from HW, but left running 592 * until the next ack and so have completed in the meantime. On 593 * resubmission of that completed request, we can skip 594 * updating the payload, and execlists can even skip submitting 595 * the request. 596 * 597 * We must remove the request from the caller's priority queue, 598 * and the caller must only call us when the request is in their 599 * priority queue, under the sched_engine->lock. This ensures that the 600 * request has *not* yet been retired and we can safely move 601 * the request into the engine->active.list where it will be 602 * dropped upon retiring. (Otherwise if resubmit a *retired* 603 * request, this would be a horrible use-after-free.) 604 */ 605 if (__i915_request_is_complete(request)) { 606 list_del_init(&request->sched.link); 607 goto active; 608 } 609 610 if (unlikely(intel_context_is_banned(request->context))) 611 i915_request_set_error_once(request, -EIO); 612 613 if (unlikely(fatal_error(request->fence.error))) 614 __i915_request_skip(request); 615 616 /* 617 * Are we using semaphores when the gpu is already saturated? 618 * 619 * Using semaphores incurs a cost in having the GPU poll a 620 * memory location, busywaiting for it to change. The continual 621 * memory reads can have a noticeable impact on the rest of the 622 * system with the extra bus traffic, stalling the cpu as it too 623 * tries to access memory across the bus (perf stat -e bus-cycles). 624 * 625 * If we installed a semaphore on this request and we only submit 626 * the request after the signaler completed, that indicates the 627 * system is overloaded and using semaphores at this time only 628 * increases the amount of work we are doing. If so, we disable 629 * further use of semaphores until we are idle again, whence we 630 * optimistically try again. 631 */ 632 if (request->sched.semaphores && 633 i915_sw_fence_signaled(&request->semaphore)) 634 engine->saturated |= request->sched.semaphores; 635 636 engine->emit_fini_breadcrumb(request, 637 request->ring->vaddr + request->postfix); 638 639 trace_i915_request_execute(request); 640 if (engine->bump_serial) 641 engine->bump_serial(engine); 642 else 643 engine->serial++; 644 645 result = true; 646 647 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 648 engine->add_active_request(request); 649 active: 650 clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); 651 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 652 653 /* 654 * XXX Rollback bonded-execution on __i915_request_unsubmit()? 655 * 656 * In the future, perhaps when we have an active time-slicing scheduler, 657 * it will be interesting to unsubmit parallel execution and remove 658 * busywaits from the GPU until their master is restarted. This is 659 * quite hairy, we have to carefully rollback the fence and do a 660 * preempt-to-idle cycle on the target engine, all the while the 661 * master execute_cb may refire. 662 */ 663 __notify_execute_cb_irq(request); 664 665 /* We may be recursing from the signal callback of another i915 fence */ 666 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 667 i915_request_enable_breadcrumb(request); 668 669 return result; 670 } 671 672 void i915_request_submit(struct i915_request *request) 673 { 674 struct intel_engine_cs *engine = request->engine; 675 unsigned long flags; 676 677 /* Will be called from irq-context when using foreign fences. */ 678 spin_lock_irqsave(&engine->sched_engine->lock, flags); 679 680 __i915_request_submit(request); 681 682 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 683 } 684 685 void __i915_request_unsubmit(struct i915_request *request) 686 { 687 struct intel_engine_cs *engine = request->engine; 688 689 /* 690 * Only unwind in reverse order, required so that the per-context list 691 * is kept in seqno/ring order. 692 */ 693 RQ_TRACE(request, "\n"); 694 695 GEM_BUG_ON(!irqs_disabled()); 696 lockdep_assert_held(&engine->sched_engine->lock); 697 698 /* 699 * Before we remove this breadcrumb from the signal list, we have 700 * to ensure that a concurrent dma_fence_enable_signaling() does not 701 * attach itself. We first mark the request as no longer active and 702 * make sure that is visible to other cores, and then remove the 703 * breadcrumb if attached. 704 */ 705 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 706 clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 707 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 708 i915_request_cancel_breadcrumb(request); 709 710 /* We've already spun, don't charge on resubmitting. */ 711 if (request->sched.semaphores && __i915_request_has_started(request)) 712 request->sched.semaphores = 0; 713 714 /* 715 * We don't need to wake_up any waiters on request->execute, they 716 * will get woken by any other event or us re-adding this request 717 * to the engine timeline (__i915_request_submit()). The waiters 718 * should be quite adapt at finding that the request now has a new 719 * global_seqno to the one they went to sleep on. 720 */ 721 } 722 723 void i915_request_unsubmit(struct i915_request *request) 724 { 725 struct intel_engine_cs *engine = request->engine; 726 unsigned long flags; 727 728 /* Will be called from irq-context when using foreign fences. */ 729 spin_lock_irqsave(&engine->sched_engine->lock, flags); 730 731 __i915_request_unsubmit(request); 732 733 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 734 } 735 736 void i915_request_cancel(struct i915_request *rq, int error) 737 { 738 if (!i915_request_set_error_once(rq, error)) 739 return; 740 741 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); 742 743 intel_context_cancel_request(rq->context, rq); 744 } 745 746 static int 747 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 748 { 749 struct i915_request *request = 750 container_of(fence, typeof(*request), submit); 751 752 switch (state) { 753 case FENCE_COMPLETE: 754 trace_i915_request_submit(request); 755 756 if (unlikely(fence->error)) 757 i915_request_set_error_once(request, fence->error); 758 else 759 __rq_arm_watchdog(request); 760 761 /* 762 * We need to serialize use of the submit_request() callback 763 * with its hotplugging performed during an emergency 764 * i915_gem_set_wedged(). We use the RCU mechanism to mark the 765 * critical section in order to force i915_gem_set_wedged() to 766 * wait until the submit_request() is completed before 767 * proceeding. 768 */ 769 rcu_read_lock(); 770 request->engine->submit_request(request); 771 rcu_read_unlock(); 772 break; 773 774 case FENCE_FREE: 775 i915_request_put(request); 776 break; 777 } 778 779 return NOTIFY_DONE; 780 } 781 782 static int 783 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 784 { 785 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); 786 787 switch (state) { 788 case FENCE_COMPLETE: 789 break; 790 791 case FENCE_FREE: 792 i915_request_put(rq); 793 break; 794 } 795 796 return NOTIFY_DONE; 797 } 798 799 static void retire_requests(struct intel_timeline *tl) 800 { 801 struct i915_request *rq, *rn; 802 803 list_for_each_entry_safe(rq, rn, &tl->requests, link) 804 if (!i915_request_retire(rq)) 805 break; 806 } 807 808 static noinline struct i915_request * 809 request_alloc_slow(struct intel_timeline *tl, 810 struct i915_request **rsvd, 811 gfp_t gfp) 812 { 813 struct i915_request *rq; 814 815 /* If we cannot wait, dip into our reserves */ 816 if (!gfpflags_allow_blocking(gfp)) { 817 rq = xchg(rsvd, NULL); 818 if (!rq) /* Use the normal failure path for one final WARN */ 819 goto out; 820 821 return rq; 822 } 823 824 if (list_empty(&tl->requests)) 825 goto out; 826 827 /* Move our oldest request to the slab-cache (if not in use!) */ 828 rq = list_first_entry(&tl->requests, typeof(*rq), link); 829 i915_request_retire(rq); 830 831 rq = kmem_cache_alloc(slab_requests, 832 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 833 if (rq) 834 return rq; 835 836 /* Ratelimit ourselves to prevent oom from malicious clients */ 837 rq = list_last_entry(&tl->requests, typeof(*rq), link); 838 cond_synchronize_rcu(rq->rcustate); 839 840 /* Retire our old requests in the hope that we free some */ 841 retire_requests(tl); 842 843 out: 844 return kmem_cache_alloc(slab_requests, gfp); 845 } 846 847 static void __i915_request_ctor(void *arg) 848 { 849 struct i915_request *rq = arg; 850 851 spin_lock_init(&rq->lock); 852 i915_sched_node_init(&rq->sched); 853 i915_sw_fence_init(&rq->submit, submit_notify); 854 i915_sw_fence_init(&rq->semaphore, semaphore_notify); 855 856 clear_capture_list(rq); 857 rq->batch_snapshot.present = false; 858 859 init_llist_head(&rq->execute_cb); 860 } 861 862 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 863 #define clear_batch_ptr(_rq) ((_rq)->batch = NULL) 864 #else 865 #define clear_batch_ptr(_a) do {} while (0) 866 #endif 867 868 struct i915_request * 869 __i915_request_create(struct intel_context *ce, gfp_t gfp) 870 { 871 struct intel_timeline *tl = ce->timeline; 872 struct i915_request *rq; 873 u32 seqno; 874 int ret; 875 876 might_alloc(gfp); 877 878 /* Check that the caller provided an already pinned context */ 879 __intel_context_pin(ce); 880 881 /* 882 * Beware: Dragons be flying overhead. 883 * 884 * We use RCU to look up requests in flight. The lookups may 885 * race with the request being allocated from the slab freelist. 886 * That is the request we are writing to here, may be in the process 887 * of being read by __i915_active_request_get_rcu(). As such, 888 * we have to be very careful when overwriting the contents. During 889 * the RCU lookup, we change chase the request->engine pointer, 890 * read the request->global_seqno and increment the reference count. 891 * 892 * The reference count is incremented atomically. If it is zero, 893 * the lookup knows the request is unallocated and complete. Otherwise, 894 * it is either still in use, or has been reallocated and reset 895 * with dma_fence_init(). This increment is safe for release as we 896 * check that the request we have a reference to and matches the active 897 * request. 898 * 899 * Before we increment the refcount, we chase the request->engine 900 * pointer. We must not call kmem_cache_zalloc() or else we set 901 * that pointer to NULL and cause a crash during the lookup. If 902 * we see the request is completed (based on the value of the 903 * old engine and seqno), the lookup is complete and reports NULL. 904 * If we decide the request is not completed (new engine or seqno), 905 * then we grab a reference and double check that it is still the 906 * active request - which it won't be and restart the lookup. 907 * 908 * Do not use kmem_cache_zalloc() here! 909 */ 910 rq = kmem_cache_alloc(slab_requests, 911 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 912 if (unlikely(!rq)) { 913 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp); 914 if (!rq) { 915 ret = -ENOMEM; 916 goto err_unreserve; 917 } 918 } 919 920 /* 921 * Hold a reference to the intel_context over life of an i915_request. 922 * Without this an i915_request can exist after the context has been 923 * destroyed (e.g. request retired, context closed, but user space holds 924 * a reference to the request from an out fence). In the case of GuC 925 * submission + virtual engine, the engine that the request references 926 * is also destroyed which can trigger bad pointer dref in fence ops 927 * (e.g. i915_fence_get_driver_name). We could likely change these 928 * functions to avoid touching the engine but let's just be safe and 929 * hold the intel_context reference. In execlist mode the request always 930 * eventually points to a physical engine so this isn't an issue. 931 */ 932 rq->context = intel_context_get(ce); 933 rq->engine = ce->engine; 934 rq->ring = ce->ring; 935 rq->execution_mask = ce->engine->mask; 936 937 ret = intel_timeline_get_seqno(tl, rq, &seqno); 938 if (ret) 939 goto err_free; 940 941 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 942 tl->fence_context, seqno); 943 944 RCU_INIT_POINTER(rq->timeline, tl); 945 rq->hwsp_seqno = tl->hwsp_seqno; 946 GEM_BUG_ON(__i915_request_is_complete(rq)); 947 948 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ 949 950 rq->guc_prio = GUC_PRIO_INIT; 951 952 /* We bump the ref for the fence chain */ 953 i915_sw_fence_reinit(&i915_request_get(rq)->submit); 954 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore); 955 956 i915_sched_node_reinit(&rq->sched); 957 958 /* No zalloc, everything must be cleared after use */ 959 clear_batch_ptr(rq); 960 __rq_init_watchdog(rq); 961 assert_capture_list_is_null(rq); 962 GEM_BUG_ON(!llist_empty(&rq->execute_cb)); 963 GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot)); 964 965 /* 966 * Reserve space in the ring buffer for all the commands required to 967 * eventually emit this request. This is to guarantee that the 968 * i915_request_add() call can't fail. Note that the reserve may need 969 * to be redone if the request is not actually submitted straight 970 * away, e.g. because a GPU scheduler has deferred it. 971 * 972 * Note that due to how we add reserved_space to intel_ring_begin() 973 * we need to double our request to ensure that if we need to wrap 974 * around inside i915_request_add() there is sufficient space at 975 * the beginning of the ring as well. 976 */ 977 rq->reserved_space = 978 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32); 979 980 /* 981 * Record the position of the start of the request so that 982 * should we detect the updated seqno part-way through the 983 * GPU processing the request, we never over-estimate the 984 * position of the head. 985 */ 986 rq->head = rq->ring->emit; 987 988 ret = rq->engine->request_alloc(rq); 989 if (ret) 990 goto err_unwind; 991 992 rq->infix = rq->ring->emit; /* end of header; start of user payload */ 993 994 intel_context_mark_active(ce); 995 list_add_tail_rcu(&rq->link, &tl->requests); 996 997 return rq; 998 999 err_unwind: 1000 ce->ring->emit = rq->head; 1001 1002 /* Make sure we didn't add ourselves to external state before freeing */ 1003 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); 1004 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); 1005 1006 err_free: 1007 intel_context_put(ce); 1008 kmem_cache_free(slab_requests, rq); 1009 err_unreserve: 1010 intel_context_unpin(ce); 1011 return ERR_PTR(ret); 1012 } 1013 1014 struct i915_request * 1015 i915_request_create(struct intel_context *ce) 1016 { 1017 struct i915_request *rq; 1018 struct intel_timeline *tl; 1019 1020 tl = intel_context_timeline_lock(ce); 1021 if (IS_ERR(tl)) 1022 return ERR_CAST(tl); 1023 1024 /* Move our oldest request to the slab-cache (if not in use!) */ 1025 rq = list_first_entry(&tl->requests, typeof(*rq), link); 1026 if (!list_is_last(&rq->link, &tl->requests)) 1027 i915_request_retire(rq); 1028 1029 intel_context_enter(ce); 1030 rq = __i915_request_create(ce, GFP_KERNEL); 1031 intel_context_exit(ce); /* active reference transferred to request */ 1032 if (IS_ERR(rq)) 1033 goto err_unlock; 1034 1035 /* Check that we do not interrupt ourselves with a new request */ 1036 rq->cookie = lockdep_pin_lock(&tl->mutex); 1037 1038 return rq; 1039 1040 err_unlock: 1041 intel_context_timeline_unlock(tl); 1042 return rq; 1043 } 1044 1045 static int 1046 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) 1047 { 1048 struct dma_fence *fence; 1049 int err; 1050 1051 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline)) 1052 return 0; 1053 1054 if (i915_request_started(signal)) 1055 return 0; 1056 1057 /* 1058 * The caller holds a reference on @signal, but we do not serialise 1059 * against it being retired and removed from the lists. 1060 * 1061 * We do not hold a reference to the request before @signal, and 1062 * so must be very careful to ensure that it is not _recycled_ as 1063 * we follow the link backwards. 1064 */ 1065 fence = NULL; 1066 rcu_read_lock(); 1067 do { 1068 struct list_head *pos = READ_ONCE(signal->link.prev); 1069 struct i915_request *prev; 1070 1071 /* Confirm signal has not been retired, the link is valid */ 1072 if (unlikely(__i915_request_has_started(signal))) 1073 break; 1074 1075 /* Is signal the earliest request on its timeline? */ 1076 if (pos == &rcu_dereference(signal->timeline)->requests) 1077 break; 1078 1079 /* 1080 * Peek at the request before us in the timeline. That 1081 * request will only be valid before it is retired, so 1082 * after acquiring a reference to it, confirm that it is 1083 * still part of the signaler's timeline. 1084 */ 1085 prev = list_entry(pos, typeof(*prev), link); 1086 if (!i915_request_get_rcu(prev)) 1087 break; 1088 1089 /* After the strong barrier, confirm prev is still attached */ 1090 if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) { 1091 i915_request_put(prev); 1092 break; 1093 } 1094 1095 fence = &prev->fence; 1096 } while (0); 1097 rcu_read_unlock(); 1098 if (!fence) 1099 return 0; 1100 1101 err = 0; 1102 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 1103 err = i915_sw_fence_await_dma_fence(&rq->submit, 1104 fence, 0, 1105 I915_FENCE_GFP); 1106 dma_fence_put(fence); 1107 1108 return err; 1109 } 1110 1111 static intel_engine_mask_t 1112 already_busywaiting(struct i915_request *rq) 1113 { 1114 /* 1115 * Polling a semaphore causes bus traffic, delaying other users of 1116 * both the GPU and CPU. We want to limit the impact on others, 1117 * while taking advantage of early submission to reduce GPU 1118 * latency. Therefore we restrict ourselves to not using more 1119 * than one semaphore from each source, and not using a semaphore 1120 * if we have detected the engine is saturated (i.e. would not be 1121 * submitted early and cause bus traffic reading an already passed 1122 * semaphore). 1123 * 1124 * See the are-we-too-late? check in __i915_request_submit(). 1125 */ 1126 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated); 1127 } 1128 1129 static int 1130 __emit_semaphore_wait(struct i915_request *to, 1131 struct i915_request *from, 1132 u32 seqno) 1133 { 1134 const int has_token = GRAPHICS_VER(to->engine->i915) >= 12; 1135 u32 hwsp_offset; 1136 int len, err; 1137 u32 *cs; 1138 1139 GEM_BUG_ON(GRAPHICS_VER(to->engine->i915) < 8); 1140 GEM_BUG_ON(i915_request_has_initial_breadcrumb(to)); 1141 1142 /* We need to pin the signaler's HWSP until we are finished reading. */ 1143 err = intel_timeline_read_hwsp(from, to, &hwsp_offset); 1144 if (err) 1145 return err; 1146 1147 len = 4; 1148 if (has_token) 1149 len += 2; 1150 1151 cs = intel_ring_begin(to, len); 1152 if (IS_ERR(cs)) 1153 return PTR_ERR(cs); 1154 1155 /* 1156 * Using greater-than-or-equal here means we have to worry 1157 * about seqno wraparound. To side step that issue, we swap 1158 * the timeline HWSP upon wrapping, so that everyone listening 1159 * for the old (pre-wrap) values do not see the much smaller 1160 * (post-wrap) values than they were expecting (and so wait 1161 * forever). 1162 */ 1163 *cs++ = (MI_SEMAPHORE_WAIT | 1164 MI_SEMAPHORE_GLOBAL_GTT | 1165 MI_SEMAPHORE_POLL | 1166 MI_SEMAPHORE_SAD_GTE_SDD) + 1167 has_token; 1168 *cs++ = seqno; 1169 *cs++ = hwsp_offset; 1170 *cs++ = 0; 1171 if (has_token) { 1172 *cs++ = 0; 1173 *cs++ = MI_NOOP; 1174 } 1175 1176 intel_ring_advance(to, cs); 1177 return 0; 1178 } 1179 1180 static bool 1181 can_use_semaphore_wait(struct i915_request *to, struct i915_request *from) 1182 { 1183 return to->engine->gt->ggtt == from->engine->gt->ggtt; 1184 } 1185 1186 static int 1187 emit_semaphore_wait(struct i915_request *to, 1188 struct i915_request *from, 1189 gfp_t gfp) 1190 { 1191 const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask; 1192 struct i915_sw_fence *wait = &to->submit; 1193 1194 if (!can_use_semaphore_wait(to, from)) 1195 goto await_fence; 1196 1197 if (!intel_context_use_semaphores(to->context)) 1198 goto await_fence; 1199 1200 if (i915_request_has_initial_breadcrumb(to)) 1201 goto await_fence; 1202 1203 /* 1204 * If this or its dependents are waiting on an external fence 1205 * that may fail catastrophically, then we want to avoid using 1206 * sempahores as they bypass the fence signaling metadata, and we 1207 * lose the fence->error propagation. 1208 */ 1209 if (from->sched.flags & I915_SCHED_HAS_EXTERNAL_CHAIN) 1210 goto await_fence; 1211 1212 /* Just emit the first semaphore we see as request space is limited. */ 1213 if (already_busywaiting(to) & mask) 1214 goto await_fence; 1215 1216 if (i915_request_await_start(to, from) < 0) 1217 goto await_fence; 1218 1219 /* Only submit our spinner after the signaler is running! */ 1220 if (__await_execution(to, from, gfp)) 1221 goto await_fence; 1222 1223 if (__emit_semaphore_wait(to, from, from->fence.seqno)) 1224 goto await_fence; 1225 1226 to->sched.semaphores |= mask; 1227 wait = &to->semaphore; 1228 1229 await_fence: 1230 return i915_sw_fence_await_dma_fence(wait, 1231 &from->fence, 0, 1232 I915_FENCE_GFP); 1233 } 1234 1235 static bool intel_timeline_sync_has_start(struct intel_timeline *tl, 1236 struct dma_fence *fence) 1237 { 1238 return __intel_timeline_sync_is_later(tl, 1239 fence->context, 1240 fence->seqno - 1); 1241 } 1242 1243 static int intel_timeline_sync_set_start(struct intel_timeline *tl, 1244 const struct dma_fence *fence) 1245 { 1246 return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1); 1247 } 1248 1249 static int 1250 __i915_request_await_execution(struct i915_request *to, 1251 struct i915_request *from) 1252 { 1253 int err; 1254 1255 GEM_BUG_ON(intel_context_is_barrier(from->context)); 1256 1257 /* Submit both requests at the same time */ 1258 err = __await_execution(to, from, I915_FENCE_GFP); 1259 if (err) 1260 return err; 1261 1262 /* Squash repeated depenendices to the same timelines */ 1263 if (intel_timeline_sync_has_start(i915_request_timeline(to), 1264 &from->fence)) 1265 return 0; 1266 1267 /* 1268 * Wait until the start of this request. 1269 * 1270 * The execution cb fires when we submit the request to HW. But in 1271 * many cases this may be long before the request itself is ready to 1272 * run (consider that we submit 2 requests for the same context, where 1273 * the request of interest is behind an indefinite spinner). So we hook 1274 * up to both to reduce our queues and keep the execution lag minimised 1275 * in the worst case, though we hope that the await_start is elided. 1276 */ 1277 err = i915_request_await_start(to, from); 1278 if (err < 0) 1279 return err; 1280 1281 /* 1282 * Ensure both start together [after all semaphores in signal] 1283 * 1284 * Now that we are queued to the HW at roughly the same time (thanks 1285 * to the execute cb) and are ready to run at roughly the same time 1286 * (thanks to the await start), our signaler may still be indefinitely 1287 * delayed by waiting on a semaphore from a remote engine. If our 1288 * signaler depends on a semaphore, so indirectly do we, and we do not 1289 * want to start our payload until our signaler also starts theirs. 1290 * So we wait. 1291 * 1292 * However, there is also a second condition for which we need to wait 1293 * for the precise start of the signaler. Consider that the signaler 1294 * was submitted in a chain of requests following another context 1295 * (with just an ordinary intra-engine fence dependency between the 1296 * two). In this case the signaler is queued to HW, but not for 1297 * immediate execution, and so we must wait until it reaches the 1298 * active slot. 1299 */ 1300 if (can_use_semaphore_wait(to, from) && 1301 intel_engine_has_semaphores(to->engine) && 1302 !i915_request_has_initial_breadcrumb(to)) { 1303 err = __emit_semaphore_wait(to, from, from->fence.seqno - 1); 1304 if (err < 0) 1305 return err; 1306 } 1307 1308 /* Couple the dependency tree for PI on this exposed to->fence */ 1309 if (to->engine->sched_engine->schedule) { 1310 err = i915_sched_node_add_dependency(&to->sched, 1311 &from->sched, 1312 I915_DEPENDENCY_WEAK); 1313 if (err < 0) 1314 return err; 1315 } 1316 1317 return intel_timeline_sync_set_start(i915_request_timeline(to), 1318 &from->fence); 1319 } 1320 1321 static void mark_external(struct i915_request *rq) 1322 { 1323 /* 1324 * The downside of using semaphores is that we lose metadata passing 1325 * along the signaling chain. This is particularly nasty when we 1326 * need to pass along a fatal error such as EFAULT or EDEADLK. For 1327 * fatal errors we want to scrub the request before it is executed, 1328 * which means that we cannot preload the request onto HW and have 1329 * it wait upon a semaphore. 1330 */ 1331 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN; 1332 } 1333 1334 static int 1335 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) 1336 { 1337 mark_external(rq); 1338 return i915_sw_fence_await_dma_fence(&rq->submit, fence, 1339 i915_fence_context_timeout(rq->engine->i915, 1340 fence->context), 1341 I915_FENCE_GFP); 1342 } 1343 1344 static int 1345 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence) 1346 { 1347 struct dma_fence *iter; 1348 int err = 0; 1349 1350 if (!to_dma_fence_chain(fence)) 1351 return __i915_request_await_external(rq, fence); 1352 1353 dma_fence_chain_for_each(iter, fence) { 1354 struct dma_fence_chain *chain = to_dma_fence_chain(iter); 1355 1356 if (!dma_fence_is_i915(chain->fence)) { 1357 err = __i915_request_await_external(rq, iter); 1358 break; 1359 } 1360 1361 err = i915_request_await_dma_fence(rq, chain->fence); 1362 if (err < 0) 1363 break; 1364 } 1365 1366 dma_fence_put(iter); 1367 return err; 1368 } 1369 1370 static inline bool is_parallel_rq(struct i915_request *rq) 1371 { 1372 return intel_context_is_parallel(rq->context); 1373 } 1374 1375 static inline struct intel_context *request_to_parent(struct i915_request *rq) 1376 { 1377 return intel_context_to_parent(rq->context); 1378 } 1379 1380 static bool is_same_parallel_context(struct i915_request *to, 1381 struct i915_request *from) 1382 { 1383 if (is_parallel_rq(to)) 1384 return request_to_parent(to) == request_to_parent(from); 1385 1386 return false; 1387 } 1388 1389 int 1390 i915_request_await_execution(struct i915_request *rq, 1391 struct dma_fence *fence) 1392 { 1393 struct dma_fence **child = &fence; 1394 unsigned int nchild = 1; 1395 int ret; 1396 1397 if (dma_fence_is_array(fence)) { 1398 struct dma_fence_array *array = to_dma_fence_array(fence); 1399 1400 /* XXX Error for signal-on-any fence arrays */ 1401 1402 child = array->fences; 1403 nchild = array->num_fences; 1404 GEM_BUG_ON(!nchild); 1405 } 1406 1407 do { 1408 fence = *child++; 1409 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1410 continue; 1411 1412 if (fence->context == rq->fence.context) 1413 continue; 1414 1415 /* 1416 * We don't squash repeated fence dependencies here as we 1417 * want to run our callback in all cases. 1418 */ 1419 1420 if (dma_fence_is_i915(fence)) { 1421 if (is_same_parallel_context(rq, to_request(fence))) 1422 continue; 1423 ret = __i915_request_await_execution(rq, 1424 to_request(fence)); 1425 } else { 1426 ret = i915_request_await_external(rq, fence); 1427 } 1428 if (ret < 0) 1429 return ret; 1430 } while (--nchild); 1431 1432 return 0; 1433 } 1434 1435 static int 1436 await_request_submit(struct i915_request *to, struct i915_request *from) 1437 { 1438 /* 1439 * If we are waiting on a virtual engine, then it may be 1440 * constrained to execute on a single engine *prior* to submission. 1441 * When it is submitted, it will be first submitted to the virtual 1442 * engine and then passed to the physical engine. We cannot allow 1443 * the waiter to be submitted immediately to the physical engine 1444 * as it may then bypass the virtual request. 1445 */ 1446 if (to->engine == READ_ONCE(from->engine)) 1447 return i915_sw_fence_await_sw_fence_gfp(&to->submit, 1448 &from->submit, 1449 I915_FENCE_GFP); 1450 else 1451 return __i915_request_await_execution(to, from); 1452 } 1453 1454 static int 1455 i915_request_await_request(struct i915_request *to, struct i915_request *from) 1456 { 1457 int ret; 1458 1459 GEM_BUG_ON(to == from); 1460 GEM_BUG_ON(to->timeline == from->timeline); 1461 1462 if (i915_request_completed(from)) { 1463 i915_sw_fence_set_error_once(&to->submit, from->fence.error); 1464 return 0; 1465 } 1466 1467 if (to->engine->sched_engine->schedule) { 1468 ret = i915_sched_node_add_dependency(&to->sched, 1469 &from->sched, 1470 I915_DEPENDENCY_EXTERNAL); 1471 if (ret < 0) 1472 return ret; 1473 } 1474 1475 if (!intel_engine_uses_guc(to->engine) && 1476 is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask))) 1477 ret = await_request_submit(to, from); 1478 else 1479 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); 1480 if (ret < 0) 1481 return ret; 1482 1483 return 0; 1484 } 1485 1486 int 1487 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 1488 { 1489 struct dma_fence **child = &fence; 1490 unsigned int nchild = 1; 1491 int ret; 1492 1493 /* 1494 * Note that if the fence-array was created in signal-on-any mode, 1495 * we should *not* decompose it into its individual fences. However, 1496 * we don't currently store which mode the fence-array is operating 1497 * in. Fortunately, the only user of signal-on-any is private to 1498 * amdgpu and we should not see any incoming fence-array from 1499 * sync-file being in signal-on-any mode. 1500 */ 1501 if (dma_fence_is_array(fence)) { 1502 struct dma_fence_array *array = to_dma_fence_array(fence); 1503 1504 child = array->fences; 1505 nchild = array->num_fences; 1506 GEM_BUG_ON(!nchild); 1507 } 1508 1509 do { 1510 fence = *child++; 1511 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1512 continue; 1513 1514 /* 1515 * Requests on the same timeline are explicitly ordered, along 1516 * with their dependencies, by i915_request_add() which ensures 1517 * that requests are submitted in-order through each ring. 1518 */ 1519 if (fence->context == rq->fence.context) 1520 continue; 1521 1522 /* Squash repeated waits to the same timelines */ 1523 if (fence->context && 1524 intel_timeline_sync_is_later(i915_request_timeline(rq), 1525 fence)) 1526 continue; 1527 1528 if (dma_fence_is_i915(fence)) { 1529 if (is_same_parallel_context(rq, to_request(fence))) 1530 continue; 1531 ret = i915_request_await_request(rq, to_request(fence)); 1532 } else { 1533 ret = i915_request_await_external(rq, fence); 1534 } 1535 if (ret < 0) 1536 return ret; 1537 1538 /* Record the latest fence used against each timeline */ 1539 if (fence->context) 1540 intel_timeline_sync_set(i915_request_timeline(rq), 1541 fence); 1542 } while (--nchild); 1543 1544 return 0; 1545 } 1546 1547 /** 1548 * i915_request_await_deps - set this request to (async) wait upon a struct 1549 * i915_deps dma_fence collection 1550 * @rq: request we are wishing to use 1551 * @deps: The struct i915_deps containing the dependencies. 1552 * 1553 * Returns 0 if successful, negative error code on error. 1554 */ 1555 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps) 1556 { 1557 int i, err; 1558 1559 for (i = 0; i < deps->num_deps; ++i) { 1560 err = i915_request_await_dma_fence(rq, deps->fences[i]); 1561 if (err) 1562 return err; 1563 } 1564 1565 return 0; 1566 } 1567 1568 /** 1569 * i915_request_await_object - set this request to (async) wait upon a bo 1570 * @to: request we are wishing to use 1571 * @obj: object which may be in use on another ring. 1572 * @write: whether the wait is on behalf of a writer 1573 * 1574 * This code is meant to abstract object synchronization with the GPU. 1575 * Conceptually we serialise writes between engines inside the GPU. 1576 * We only allow one engine to write into a buffer at any time, but 1577 * multiple readers. To ensure each has a coherent view of memory, we must: 1578 * 1579 * - If there is an outstanding write request to the object, the new 1580 * request must wait for it to complete (either CPU or in hw, requests 1581 * on the same ring will be naturally ordered). 1582 * 1583 * - If we are a write request (pending_write_domain is set), the new 1584 * request must wait for outstanding read requests to complete. 1585 * 1586 * Returns 0 if successful, else propagates up the lower layer error. 1587 */ 1588 int 1589 i915_request_await_object(struct i915_request *to, 1590 struct drm_i915_gem_object *obj, 1591 bool write) 1592 { 1593 struct dma_resv_iter cursor; 1594 struct dma_fence *fence; 1595 int ret = 0; 1596 1597 dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) { 1598 ret = i915_request_await_dma_fence(to, fence); 1599 if (ret) 1600 break; 1601 } 1602 1603 return ret; 1604 } 1605 1606 static struct i915_request * 1607 __i915_request_ensure_parallel_ordering(struct i915_request *rq, 1608 struct intel_timeline *timeline) 1609 { 1610 struct i915_request *prev; 1611 1612 GEM_BUG_ON(!is_parallel_rq(rq)); 1613 1614 prev = request_to_parent(rq)->parallel.last_rq; 1615 if (prev) { 1616 if (!__i915_request_is_complete(prev)) { 1617 i915_sw_fence_await_sw_fence(&rq->submit, 1618 &prev->submit, 1619 &rq->submitq); 1620 1621 if (rq->engine->sched_engine->schedule) 1622 __i915_sched_node_add_dependency(&rq->sched, 1623 &prev->sched, 1624 &rq->dep, 1625 0); 1626 } 1627 i915_request_put(prev); 1628 } 1629 1630 request_to_parent(rq)->parallel.last_rq = i915_request_get(rq); 1631 1632 return to_request(__i915_active_fence_set(&timeline->last_request, 1633 &rq->fence)); 1634 } 1635 1636 static struct i915_request * 1637 __i915_request_ensure_ordering(struct i915_request *rq, 1638 struct intel_timeline *timeline) 1639 { 1640 struct i915_request *prev; 1641 1642 GEM_BUG_ON(is_parallel_rq(rq)); 1643 1644 prev = to_request(__i915_active_fence_set(&timeline->last_request, 1645 &rq->fence)); 1646 1647 if (prev && !__i915_request_is_complete(prev)) { 1648 bool uses_guc = intel_engine_uses_guc(rq->engine); 1649 bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask | 1650 rq->engine->mask); 1651 bool same_context = prev->context == rq->context; 1652 1653 /* 1654 * The requests are supposed to be kept in order. However, 1655 * we need to be wary in case the timeline->last_request 1656 * is used as a barrier for external modification to this 1657 * context. 1658 */ 1659 GEM_BUG_ON(same_context && 1660 i915_seqno_passed(prev->fence.seqno, 1661 rq->fence.seqno)); 1662 1663 if ((same_context && uses_guc) || (!uses_guc && pow2)) 1664 i915_sw_fence_await_sw_fence(&rq->submit, 1665 &prev->submit, 1666 &rq->submitq); 1667 else 1668 __i915_sw_fence_await_dma_fence(&rq->submit, 1669 &prev->fence, 1670 &rq->dmaq); 1671 if (rq->engine->sched_engine->schedule) 1672 __i915_sched_node_add_dependency(&rq->sched, 1673 &prev->sched, 1674 &rq->dep, 1675 0); 1676 } 1677 1678 return prev; 1679 } 1680 1681 static struct i915_request * 1682 __i915_request_add_to_timeline(struct i915_request *rq) 1683 { 1684 struct intel_timeline *timeline = i915_request_timeline(rq); 1685 struct i915_request *prev; 1686 1687 /* 1688 * Dependency tracking and request ordering along the timeline 1689 * is special cased so that we can eliminate redundant ordering 1690 * operations while building the request (we know that the timeline 1691 * itself is ordered, and here we guarantee it). 1692 * 1693 * As we know we will need to emit tracking along the timeline, 1694 * we embed the hooks into our request struct -- at the cost of 1695 * having to have specialised no-allocation interfaces (which will 1696 * be beneficial elsewhere). 1697 * 1698 * A second benefit to open-coding i915_request_await_request is 1699 * that we can apply a slight variant of the rules specialised 1700 * for timelines that jump between engines (such as virtual engines). 1701 * If we consider the case of virtual engine, we must emit a dma-fence 1702 * to prevent scheduling of the second request until the first is 1703 * complete (to maximise our greedy late load balancing) and this 1704 * precludes optimising to use semaphores serialisation of a single 1705 * timeline across engines. 1706 * 1707 * We do not order parallel submission requests on the timeline as each 1708 * parallel submission context has its own timeline and the ordering 1709 * rules for parallel requests are that they must be submitted in the 1710 * order received from the execbuf IOCTL. So rather than using the 1711 * timeline we store a pointer to last request submitted in the 1712 * relationship in the gem context and insert a submission fence 1713 * between that request and request passed into this function or 1714 * alternatively we use completion fence if gem context has a single 1715 * timeline and this is the first submission of an execbuf IOCTL. 1716 */ 1717 if (likely(!is_parallel_rq(rq))) 1718 prev = __i915_request_ensure_ordering(rq, timeline); 1719 else 1720 prev = __i915_request_ensure_parallel_ordering(rq, timeline); 1721 1722 /* 1723 * Make sure that no request gazumped us - if it was allocated after 1724 * our i915_request_alloc() and called __i915_request_add() before 1725 * us, the timeline will hold its seqno which is later than ours. 1726 */ 1727 GEM_BUG_ON(timeline->seqno != rq->fence.seqno); 1728 1729 return prev; 1730 } 1731 1732 /* 1733 * NB: This function is not allowed to fail. Doing so would mean the the 1734 * request is not being tracked for completion but the work itself is 1735 * going to happen on the hardware. This would be a Bad Thing(tm). 1736 */ 1737 struct i915_request *__i915_request_commit(struct i915_request *rq) 1738 { 1739 struct intel_engine_cs *engine = rq->engine; 1740 struct intel_ring *ring = rq->ring; 1741 u32 *cs; 1742 1743 RQ_TRACE(rq, "\n"); 1744 1745 /* 1746 * To ensure that this call will not fail, space for its emissions 1747 * should already have been reserved in the ring buffer. Let the ring 1748 * know that it is time to use that space up. 1749 */ 1750 GEM_BUG_ON(rq->reserved_space > ring->space); 1751 rq->reserved_space = 0; 1752 rq->emitted_jiffies = jiffies; 1753 1754 /* 1755 * Record the position of the start of the breadcrumb so that 1756 * should we detect the updated seqno part-way through the 1757 * GPU processing the request, we never over-estimate the 1758 * position of the ring's HEAD. 1759 */ 1760 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); 1761 GEM_BUG_ON(IS_ERR(cs)); 1762 rq->postfix = intel_ring_offset(rq, cs); 1763 1764 return __i915_request_add_to_timeline(rq); 1765 } 1766 1767 void __i915_request_queue_bh(struct i915_request *rq) 1768 { 1769 i915_sw_fence_commit(&rq->semaphore); 1770 i915_sw_fence_commit(&rq->submit); 1771 } 1772 1773 void __i915_request_queue(struct i915_request *rq, 1774 const struct i915_sched_attr *attr) 1775 { 1776 /* 1777 * Let the backend know a new request has arrived that may need 1778 * to adjust the existing execution schedule due to a high priority 1779 * request - i.e. we may want to preempt the current request in order 1780 * to run a high priority dependency chain *before* we can execute this 1781 * request. 1782 * 1783 * This is called before the request is ready to run so that we can 1784 * decide whether to preempt the entire chain so that it is ready to 1785 * run at the earliest possible convenience. 1786 */ 1787 if (attr && rq->engine->sched_engine->schedule) 1788 rq->engine->sched_engine->schedule(rq, attr); 1789 1790 local_bh_disable(); 1791 __i915_request_queue_bh(rq); 1792 local_bh_enable(); /* kick tasklets */ 1793 } 1794 1795 void i915_request_add(struct i915_request *rq) 1796 { 1797 struct intel_timeline * const tl = i915_request_timeline(rq); 1798 struct i915_sched_attr attr = {}; 1799 struct i915_gem_context *ctx; 1800 1801 lockdep_assert_held(&tl->mutex); 1802 lockdep_unpin_lock(&tl->mutex, rq->cookie); 1803 1804 trace_i915_request_add(rq); 1805 __i915_request_commit(rq); 1806 1807 /* XXX placeholder for selftests */ 1808 rcu_read_lock(); 1809 ctx = rcu_dereference(rq->context->gem_context); 1810 if (ctx) 1811 attr = ctx->sched; 1812 rcu_read_unlock(); 1813 1814 __i915_request_queue(rq, &attr); 1815 1816 mutex_unlock(&tl->mutex); 1817 } 1818 1819 static unsigned long local_clock_ns(unsigned int *cpu) 1820 { 1821 unsigned long t; 1822 1823 /* 1824 * Cheaply and approximately convert from nanoseconds to microseconds. 1825 * The result and subsequent calculations are also defined in the same 1826 * approximate microseconds units. The principal source of timing 1827 * error here is from the simple truncation. 1828 * 1829 * Note that local_clock() is only defined wrt to the current CPU; 1830 * the comparisons are no longer valid if we switch CPUs. Instead of 1831 * blocking preemption for the entire busywait, we can detect the CPU 1832 * switch and use that as indicator of system load and a reason to 1833 * stop busywaiting, see busywait_stop(). 1834 */ 1835 *cpu = get_cpu(); 1836 t = local_clock(); 1837 put_cpu(); 1838 1839 return t; 1840 } 1841 1842 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 1843 { 1844 unsigned int this_cpu; 1845 1846 if (time_after(local_clock_ns(&this_cpu), timeout)) 1847 return true; 1848 1849 return this_cpu != cpu; 1850 } 1851 1852 static bool __i915_spin_request(struct i915_request * const rq, int state) 1853 { 1854 unsigned long timeout_ns; 1855 unsigned int cpu; 1856 1857 /* 1858 * Only wait for the request if we know it is likely to complete. 1859 * 1860 * We don't track the timestamps around requests, nor the average 1861 * request length, so we do not have a good indicator that this 1862 * request will complete within the timeout. What we do know is the 1863 * order in which requests are executed by the context and so we can 1864 * tell if the request has been started. If the request is not even 1865 * running yet, it is a fair assumption that it will not complete 1866 * within our relatively short timeout. 1867 */ 1868 if (!i915_request_is_running(rq)) 1869 return false; 1870 1871 /* 1872 * When waiting for high frequency requests, e.g. during synchronous 1873 * rendering split between the CPU and GPU, the finite amount of time 1874 * required to set up the irq and wait upon it limits the response 1875 * rate. By busywaiting on the request completion for a short while we 1876 * can service the high frequency waits as quick as possible. However, 1877 * if it is a slow request, we want to sleep as quickly as possible. 1878 * The tradeoff between waiting and sleeping is roughly the time it 1879 * takes to sleep on a request, on the order of a microsecond. 1880 */ 1881 1882 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); 1883 timeout_ns += local_clock_ns(&cpu); 1884 do { 1885 if (dma_fence_is_signaled(&rq->fence)) 1886 return true; 1887 1888 if (signal_pending_state(state, current)) 1889 break; 1890 1891 if (busywait_stop(timeout_ns, cpu)) 1892 break; 1893 1894 cpu_relax(); 1895 } while (!need_resched()); 1896 1897 return false; 1898 } 1899 1900 struct request_wait { 1901 struct dma_fence_cb cb; 1902 struct task_struct *tsk; 1903 }; 1904 1905 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) 1906 { 1907 struct request_wait *wait = container_of(cb, typeof(*wait), cb); 1908 1909 wake_up_process(fetch_and_zero(&wait->tsk)); 1910 } 1911 1912 /** 1913 * i915_request_wait_timeout - wait until execution of request has finished 1914 * @rq: the request to wait upon 1915 * @flags: how to wait 1916 * @timeout: how long to wait in jiffies 1917 * 1918 * i915_request_wait_timeout() waits for the request to be completed, for a 1919 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1920 * unbounded wait). 1921 * 1922 * Returns the remaining time (in jiffies) if the request completed, which may 1923 * be zero if the request is unfinished after the timeout expires. 1924 * If the timeout is 0, it will return 1 if the fence is signaled. 1925 * 1926 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1927 * pending before the request completes. 1928 * 1929 * NOTE: This function has the same wait semantics as dma-fence. 1930 */ 1931 long i915_request_wait_timeout(struct i915_request *rq, 1932 unsigned int flags, 1933 long timeout) 1934 { 1935 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1936 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1937 struct request_wait wait; 1938 1939 might_sleep(); 1940 GEM_BUG_ON(timeout < 0); 1941 1942 if (dma_fence_is_signaled(&rq->fence)) 1943 return timeout ?: 1; 1944 1945 if (!timeout) 1946 return -ETIME; 1947 1948 trace_i915_request_wait_begin(rq, flags); 1949 1950 /* 1951 * We must never wait on the GPU while holding a lock as we 1952 * may need to perform a GPU reset. So while we don't need to 1953 * serialise wait/reset with an explicit lock, we do want 1954 * lockdep to detect potential dependency cycles. 1955 */ 1956 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); 1957 1958 /* 1959 * Optimistic spin before touching IRQs. 1960 * 1961 * We may use a rather large value here to offset the penalty of 1962 * switching away from the active task. Frequently, the client will 1963 * wait upon an old swapbuffer to throttle itself to remain within a 1964 * frame of the gpu. If the client is running in lockstep with the gpu, 1965 * then it should not be waiting long at all, and a sleep now will incur 1966 * extra scheduler latency in producing the next frame. To try to 1967 * avoid adding the cost of enabling/disabling the interrupt to the 1968 * short wait, we first spin to see if the request would have completed 1969 * in the time taken to setup the interrupt. 1970 * 1971 * We need upto 5us to enable the irq, and upto 20us to hide the 1972 * scheduler latency of a context switch, ignoring the secondary 1973 * impacts from a context switch such as cache eviction. 1974 * 1975 * The scheme used for low-latency IO is called "hybrid interrupt 1976 * polling". The suggestion there is to sleep until just before you 1977 * expect to be woken by the device interrupt and then poll for its 1978 * completion. That requires having a good predictor for the request 1979 * duration, which we currently lack. 1980 */ 1981 if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT && 1982 __i915_spin_request(rq, state)) 1983 goto out; 1984 1985 /* 1986 * This client is about to stall waiting for the GPU. In many cases 1987 * this is undesirable and limits the throughput of the system, as 1988 * many clients cannot continue processing user input/output whilst 1989 * blocked. RPS autotuning may take tens of milliseconds to respond 1990 * to the GPU load and thus incurs additional latency for the client. 1991 * We can circumvent that by promoting the GPU frequency to maximum 1992 * before we sleep. This makes the GPU throttle up much more quickly 1993 * (good for benchmarks and user experience, e.g. window animations), 1994 * but at a cost of spending more power processing the workload 1995 * (bad for battery). 1996 */ 1997 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) 1998 intel_rps_boost(rq); 1999 2000 wait.tsk = current; 2001 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) 2002 goto out; 2003 2004 /* 2005 * Flush the submission tasklet, but only if it may help this request. 2006 * 2007 * We sometimes experience some latency between the HW interrupts and 2008 * tasklet execution (mostly due to ksoftirqd latency, but it can also 2009 * be due to lazy CS events), so lets run the tasklet manually if there 2010 * is a chance it may submit this request. If the request is not ready 2011 * to run, as it is waiting for other fences to be signaled, flushing 2012 * the tasklet is busy work without any advantage for this client. 2013 * 2014 * If the HW is being lazy, this is the last chance before we go to 2015 * sleep to catch any pending events. We will check periodically in 2016 * the heartbeat to flush the submission tasklets as a last resort 2017 * for unhappy HW. 2018 */ 2019 if (i915_request_is_ready(rq)) 2020 __intel_engine_flush_submission(rq->engine, false); 2021 2022 for (;;) { 2023 set_current_state(state); 2024 2025 if (dma_fence_is_signaled(&rq->fence)) 2026 break; 2027 2028 if (signal_pending_state(state, current)) { 2029 timeout = -ERESTARTSYS; 2030 break; 2031 } 2032 2033 if (!timeout) { 2034 timeout = -ETIME; 2035 break; 2036 } 2037 2038 timeout = io_schedule_timeout(timeout); 2039 } 2040 __set_current_state(TASK_RUNNING); 2041 2042 if (READ_ONCE(wait.tsk)) 2043 dma_fence_remove_callback(&rq->fence, &wait.cb); 2044 GEM_BUG_ON(!list_empty(&wait.cb.node)); 2045 2046 out: 2047 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); 2048 trace_i915_request_wait_end(rq); 2049 return timeout; 2050 } 2051 2052 /** 2053 * i915_request_wait - wait until execution of request has finished 2054 * @rq: the request to wait upon 2055 * @flags: how to wait 2056 * @timeout: how long to wait in jiffies 2057 * 2058 * i915_request_wait() waits for the request to be completed, for a 2059 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 2060 * unbounded wait). 2061 * 2062 * Returns the remaining time (in jiffies) if the request completed, which may 2063 * be zero or -ETIME if the request is unfinished after the timeout expires. 2064 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 2065 * pending before the request completes. 2066 * 2067 * NOTE: This function behaves differently from dma-fence wait semantics for 2068 * timeout = 0. It returns 0 on success, and -ETIME if not signaled. 2069 */ 2070 long i915_request_wait(struct i915_request *rq, 2071 unsigned int flags, 2072 long timeout) 2073 { 2074 long ret = i915_request_wait_timeout(rq, flags, timeout); 2075 2076 if (!ret) 2077 return -ETIME; 2078 2079 if (ret > 0 && !timeout) 2080 return 0; 2081 2082 return ret; 2083 } 2084 2085 static int print_sched_attr(const struct i915_sched_attr *attr, 2086 char *buf, int x, int len) 2087 { 2088 if (attr->priority == I915_PRIORITY_INVALID) 2089 return x; 2090 2091 x += snprintf(buf + x, len - x, 2092 " prio=%d", attr->priority); 2093 2094 return x; 2095 } 2096 2097 static char queue_status(const struct i915_request *rq) 2098 { 2099 if (i915_request_is_active(rq)) 2100 return 'E'; 2101 2102 if (i915_request_is_ready(rq)) 2103 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R'; 2104 2105 return 'U'; 2106 } 2107 2108 static const char *run_status(const struct i915_request *rq) 2109 { 2110 if (__i915_request_is_complete(rq)) 2111 return "!"; 2112 2113 if (__i915_request_has_started(rq)) 2114 return "*"; 2115 2116 if (!i915_sw_fence_signaled(&rq->semaphore)) 2117 return "&"; 2118 2119 return ""; 2120 } 2121 2122 static const char *fence_status(const struct i915_request *rq) 2123 { 2124 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 2125 return "+"; 2126 2127 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) 2128 return "-"; 2129 2130 return ""; 2131 } 2132 2133 void i915_request_show(struct drm_printer *m, 2134 const struct i915_request *rq, 2135 const char *prefix, 2136 int indent) 2137 { 2138 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence); 2139 char buf[80] = ""; 2140 int x = 0; 2141 2142 /* 2143 * The prefix is used to show the queue status, for which we use 2144 * the following flags: 2145 * 2146 * U [Unready] 2147 * - initial status upon being submitted by the user 2148 * 2149 * - the request is not ready for execution as it is waiting 2150 * for external fences 2151 * 2152 * R [Ready] 2153 * - all fences the request was waiting on have been signaled, 2154 * and the request is now ready for execution and will be 2155 * in a backend queue 2156 * 2157 * - a ready request may still need to wait on semaphores 2158 * [internal fences] 2159 * 2160 * V [Ready/virtual] 2161 * - same as ready, but queued over multiple backends 2162 * 2163 * E [Executing] 2164 * - the request has been transferred from the backend queue and 2165 * submitted for execution on HW 2166 * 2167 * - a completed request may still be regarded as executing, its 2168 * status may not be updated until it is retired and removed 2169 * from the lists 2170 */ 2171 2172 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf)); 2173 2174 drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n", 2175 prefix, indent, " ", 2176 queue_status(rq), 2177 rq->fence.context, rq->fence.seqno, 2178 run_status(rq), 2179 fence_status(rq), 2180 buf, 2181 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 2182 name); 2183 } 2184 2185 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq) 2186 { 2187 u32 ring = ENGINE_READ(engine, RING_START); 2188 2189 return ring == i915_ggtt_offset(rq->ring->vma); 2190 } 2191 2192 static bool match_ring(struct i915_request *rq) 2193 { 2194 struct intel_engine_cs *engine; 2195 bool found; 2196 int i; 2197 2198 if (!intel_engine_is_virtual(rq->engine)) 2199 return engine_match_ring(rq->engine, rq); 2200 2201 found = false; 2202 i = 0; 2203 while ((engine = intel_engine_get_sibling(rq->engine, i++))) { 2204 found = engine_match_ring(engine, rq); 2205 if (found) 2206 break; 2207 } 2208 2209 return found; 2210 } 2211 2212 enum i915_request_state i915_test_request_state(struct i915_request *rq) 2213 { 2214 if (i915_request_completed(rq)) 2215 return I915_REQUEST_COMPLETE; 2216 2217 if (!i915_request_started(rq)) 2218 return I915_REQUEST_PENDING; 2219 2220 if (match_ring(rq)) 2221 return I915_REQUEST_ACTIVE; 2222 2223 return I915_REQUEST_QUEUED; 2224 } 2225 2226 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2227 #include "selftests/mock_request.c" 2228 #include "selftests/i915_request.c" 2229 #endif 2230 2231 void i915_request_module_exit(void) 2232 { 2233 kmem_cache_destroy(slab_execute_cbs); 2234 kmem_cache_destroy(slab_requests); 2235 } 2236 2237 int __init i915_request_module_init(void) 2238 { 2239 slab_requests = 2240 kmem_cache_create("i915_request", 2241 sizeof(struct i915_request), 2242 __alignof__(struct i915_request), 2243 SLAB_HWCACHE_ALIGN | 2244 SLAB_RECLAIM_ACCOUNT | 2245 SLAB_TYPESAFE_BY_RCU, 2246 __i915_request_ctor); 2247 if (!slab_requests) 2248 return -ENOMEM; 2249 2250 slab_execute_cbs = KMEM_CACHE(execute_cb, 2251 SLAB_HWCACHE_ALIGN | 2252 SLAB_RECLAIM_ACCOUNT | 2253 SLAB_TYPESAFE_BY_RCU); 2254 if (!slab_execute_cbs) 2255 goto err_requests; 2256 2257 return 0; 2258 2259 err_requests: 2260 kmem_cache_destroy(slab_requests); 2261 return -ENOMEM; 2262 } 2263