1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/dma-fence-array.h> 26 #include <linux/irq_work.h> 27 #include <linux/prefetch.h> 28 #include <linux/sched.h> 29 #include <linux/sched/clock.h> 30 #include <linux/sched/signal.h> 31 32 #include "gem/i915_gem_context.h" 33 #include "gt/intel_context.h" 34 #include "gt/intel_ring.h" 35 #include "gt/intel_rps.h" 36 37 #include "i915_active.h" 38 #include "i915_drv.h" 39 #include "i915_globals.h" 40 #include "i915_trace.h" 41 #include "intel_pm.h" 42 43 struct execute_cb { 44 struct list_head link; 45 struct irq_work work; 46 struct i915_sw_fence *fence; 47 void (*hook)(struct i915_request *rq, struct dma_fence *signal); 48 struct i915_request *signal; 49 }; 50 51 static struct i915_global_request { 52 struct i915_global base; 53 struct kmem_cache *slab_requests; 54 struct kmem_cache *slab_dependencies; 55 struct kmem_cache *slab_execute_cbs; 56 } global; 57 58 static const char *i915_fence_get_driver_name(struct dma_fence *fence) 59 { 60 return "i915"; 61 } 62 63 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 64 { 65 /* 66 * The timeline struct (as part of the ppgtt underneath a context) 67 * may be freed when the request is no longer in use by the GPU. 68 * We could extend the life of a context to beyond that of all 69 * fences, possibly keeping the hw resource around indefinitely, 70 * or we just give them a false name. Since 71 * dma_fence_ops.get_timeline_name is a debug feature, the occasional 72 * lie seems justifiable. 73 */ 74 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 75 return "signaled"; 76 77 return to_request(fence)->gem_context->name ?: "[i915]"; 78 } 79 80 static bool i915_fence_signaled(struct dma_fence *fence) 81 { 82 return i915_request_completed(to_request(fence)); 83 } 84 85 static bool i915_fence_enable_signaling(struct dma_fence *fence) 86 { 87 return i915_request_enable_breadcrumb(to_request(fence)); 88 } 89 90 static signed long i915_fence_wait(struct dma_fence *fence, 91 bool interruptible, 92 signed long timeout) 93 { 94 return i915_request_wait(to_request(fence), 95 interruptible | I915_WAIT_PRIORITY, 96 timeout); 97 } 98 99 static void i915_fence_release(struct dma_fence *fence) 100 { 101 struct i915_request *rq = to_request(fence); 102 103 /* 104 * The request is put onto a RCU freelist (i.e. the address 105 * is immediately reused), mark the fences as being freed now. 106 * Otherwise the debugobjects for the fences are only marked as 107 * freed when the slab cache itself is freed, and so we would get 108 * caught trying to reuse dead objects. 109 */ 110 i915_sw_fence_fini(&rq->submit); 111 i915_sw_fence_fini(&rq->semaphore); 112 113 kmem_cache_free(global.slab_requests, rq); 114 } 115 116 const struct dma_fence_ops i915_fence_ops = { 117 .get_driver_name = i915_fence_get_driver_name, 118 .get_timeline_name = i915_fence_get_timeline_name, 119 .enable_signaling = i915_fence_enable_signaling, 120 .signaled = i915_fence_signaled, 121 .wait = i915_fence_wait, 122 .release = i915_fence_release, 123 }; 124 125 static void irq_execute_cb(struct irq_work *wrk) 126 { 127 struct execute_cb *cb = container_of(wrk, typeof(*cb), work); 128 129 i915_sw_fence_complete(cb->fence); 130 kmem_cache_free(global.slab_execute_cbs, cb); 131 } 132 133 static void irq_execute_cb_hook(struct irq_work *wrk) 134 { 135 struct execute_cb *cb = container_of(wrk, typeof(*cb), work); 136 137 cb->hook(container_of(cb->fence, struct i915_request, submit), 138 &cb->signal->fence); 139 i915_request_put(cb->signal); 140 141 irq_execute_cb(wrk); 142 } 143 144 static void __notify_execute_cb(struct i915_request *rq) 145 { 146 struct execute_cb *cb; 147 148 lockdep_assert_held(&rq->lock); 149 150 if (list_empty(&rq->execute_cb)) 151 return; 152 153 list_for_each_entry(cb, &rq->execute_cb, link) 154 irq_work_queue(&cb->work); 155 156 /* 157 * XXX Rollback on __i915_request_unsubmit() 158 * 159 * In the future, perhaps when we have an active time-slicing scheduler, 160 * it will be interesting to unsubmit parallel execution and remove 161 * busywaits from the GPU until their master is restarted. This is 162 * quite hairy, we have to carefully rollback the fence and do a 163 * preempt-to-idle cycle on the target engine, all the while the 164 * master execute_cb may refire. 165 */ 166 INIT_LIST_HEAD(&rq->execute_cb); 167 } 168 169 static inline void 170 remove_from_client(struct i915_request *request) 171 { 172 struct drm_i915_file_private *file_priv; 173 174 if (!READ_ONCE(request->file_priv)) 175 return; 176 177 rcu_read_lock(); 178 file_priv = xchg(&request->file_priv, NULL); 179 if (file_priv) { 180 spin_lock(&file_priv->mm.lock); 181 list_del(&request->client_link); 182 spin_unlock(&file_priv->mm.lock); 183 } 184 rcu_read_unlock(); 185 } 186 187 static void free_capture_list(struct i915_request *request) 188 { 189 struct i915_capture_list *capture; 190 191 capture = request->capture_list; 192 while (capture) { 193 struct i915_capture_list *next = capture->next; 194 195 kfree(capture); 196 capture = next; 197 } 198 } 199 200 static void remove_from_engine(struct i915_request *rq) 201 { 202 struct intel_engine_cs *engine, *locked; 203 204 /* 205 * Virtual engines complicate acquiring the engine timeline lock, 206 * as their rq->engine pointer is not stable until under that 207 * engine lock. The simple ploy we use is to take the lock then 208 * check that the rq still belongs to the newly locked engine. 209 */ 210 locked = READ_ONCE(rq->engine); 211 spin_lock_irq(&locked->active.lock); 212 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 213 spin_unlock(&locked->active.lock); 214 spin_lock(&engine->active.lock); 215 locked = engine; 216 } 217 list_del(&rq->sched.link); 218 spin_unlock_irq(&locked->active.lock); 219 } 220 221 bool i915_request_retire(struct i915_request *rq) 222 { 223 if (!i915_request_completed(rq)) 224 return false; 225 226 GEM_TRACE("%s fence %llx:%lld, current %d\n", 227 rq->engine->name, 228 rq->fence.context, rq->fence.seqno, 229 hwsp_seqno(rq)); 230 231 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 232 trace_i915_request_retire(rq); 233 234 /* 235 * We know the GPU must have read the request to have 236 * sent us the seqno + interrupt, so use the position 237 * of tail of the request to update the last known position 238 * of the GPU head. 239 * 240 * Note this requires that we are always called in request 241 * completion order. 242 */ 243 GEM_BUG_ON(!list_is_first(&rq->link, 244 &i915_request_timeline(rq)->requests)); 245 rq->ring->head = rq->postfix; 246 247 /* 248 * We only loosely track inflight requests across preemption, 249 * and so we may find ourselves attempting to retire a _completed_ 250 * request that we have removed from the HW and put back on a run 251 * queue. 252 */ 253 remove_from_engine(rq); 254 255 spin_lock_irq(&rq->lock); 256 i915_request_mark_complete(rq); 257 if (!i915_request_signaled(rq)) 258 dma_fence_signal_locked(&rq->fence); 259 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) 260 i915_request_cancel_breadcrumb(rq); 261 if (i915_request_has_waitboost(rq)) { 262 GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); 263 atomic_dec(&rq->engine->gt->rps.num_waiters); 264 } 265 if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { 266 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 267 __notify_execute_cb(rq); 268 } 269 GEM_BUG_ON(!list_empty(&rq->execute_cb)); 270 spin_unlock_irq(&rq->lock); 271 272 remove_from_client(rq); 273 list_del(&rq->link); 274 275 intel_context_exit(rq->hw_context); 276 intel_context_unpin(rq->hw_context); 277 278 free_capture_list(rq); 279 i915_sched_node_fini(&rq->sched); 280 i915_request_put(rq); 281 282 return true; 283 } 284 285 void i915_request_retire_upto(struct i915_request *rq) 286 { 287 struct intel_timeline * const tl = i915_request_timeline(rq); 288 struct i915_request *tmp; 289 290 GEM_TRACE("%s fence %llx:%lld, current %d\n", 291 rq->engine->name, 292 rq->fence.context, rq->fence.seqno, 293 hwsp_seqno(rq)); 294 295 GEM_BUG_ON(!i915_request_completed(rq)); 296 297 do { 298 tmp = list_first_entry(&tl->requests, typeof(*tmp), link); 299 } while (i915_request_retire(tmp) && tmp != rq); 300 } 301 302 static int 303 __i915_request_await_execution(struct i915_request *rq, 304 struct i915_request *signal, 305 void (*hook)(struct i915_request *rq, 306 struct dma_fence *signal), 307 gfp_t gfp) 308 { 309 struct execute_cb *cb; 310 311 if (i915_request_is_active(signal)) { 312 if (hook) 313 hook(rq, &signal->fence); 314 return 0; 315 } 316 317 cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); 318 if (!cb) 319 return -ENOMEM; 320 321 cb->fence = &rq->submit; 322 i915_sw_fence_await(cb->fence); 323 init_irq_work(&cb->work, irq_execute_cb); 324 325 if (hook) { 326 cb->hook = hook; 327 cb->signal = i915_request_get(signal); 328 cb->work.func = irq_execute_cb_hook; 329 } 330 331 spin_lock_irq(&signal->lock); 332 if (i915_request_is_active(signal)) { 333 if (hook) { 334 hook(rq, &signal->fence); 335 i915_request_put(signal); 336 } 337 i915_sw_fence_complete(cb->fence); 338 kmem_cache_free(global.slab_execute_cbs, cb); 339 } else { 340 list_add_tail(&cb->link, &signal->execute_cb); 341 } 342 spin_unlock_irq(&signal->lock); 343 344 return 0; 345 } 346 347 bool __i915_request_submit(struct i915_request *request) 348 { 349 struct intel_engine_cs *engine = request->engine; 350 bool result = false; 351 352 GEM_TRACE("%s fence %llx:%lld, current %d\n", 353 engine->name, 354 request->fence.context, request->fence.seqno, 355 hwsp_seqno(request)); 356 357 GEM_BUG_ON(!irqs_disabled()); 358 lockdep_assert_held(&engine->active.lock); 359 360 /* 361 * With the advent of preempt-to-busy, we frequently encounter 362 * requests that we have unsubmitted from HW, but left running 363 * until the next ack and so have completed in the meantime. On 364 * resubmission of that completed request, we can skip 365 * updating the payload, and execlists can even skip submitting 366 * the request. 367 * 368 * We must remove the request from the caller's priority queue, 369 * and the caller must only call us when the request is in their 370 * priority queue, under the active.lock. This ensures that the 371 * request has *not* yet been retired and we can safely move 372 * the request into the engine->active.list where it will be 373 * dropped upon retiring. (Otherwise if resubmit a *retired* 374 * request, this would be a horrible use-after-free.) 375 */ 376 if (i915_request_completed(request)) 377 goto xfer; 378 379 if (i915_gem_context_is_banned(request->gem_context)) 380 i915_request_skip(request, -EIO); 381 382 /* 383 * Are we using semaphores when the gpu is already saturated? 384 * 385 * Using semaphores incurs a cost in having the GPU poll a 386 * memory location, busywaiting for it to change. The continual 387 * memory reads can have a noticeable impact on the rest of the 388 * system with the extra bus traffic, stalling the cpu as it too 389 * tries to access memory across the bus (perf stat -e bus-cycles). 390 * 391 * If we installed a semaphore on this request and we only submit 392 * the request after the signaler completed, that indicates the 393 * system is overloaded and using semaphores at this time only 394 * increases the amount of work we are doing. If so, we disable 395 * further use of semaphores until we are idle again, whence we 396 * optimistically try again. 397 */ 398 if (request->sched.semaphores && 399 i915_sw_fence_signaled(&request->semaphore)) 400 engine->saturated |= request->sched.semaphores; 401 402 engine->emit_fini_breadcrumb(request, 403 request->ring->vaddr + request->postfix); 404 405 trace_i915_request_execute(request); 406 engine->serial++; 407 result = true; 408 409 xfer: /* We may be recursing from the signal callback of another i915 fence */ 410 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 411 412 if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) 413 list_move_tail(&request->sched.link, &engine->active.requests); 414 415 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && 416 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && 417 !i915_request_enable_breadcrumb(request)) 418 intel_engine_queue_breadcrumbs(engine); 419 420 __notify_execute_cb(request); 421 422 spin_unlock(&request->lock); 423 424 return result; 425 } 426 427 void i915_request_submit(struct i915_request *request) 428 { 429 struct intel_engine_cs *engine = request->engine; 430 unsigned long flags; 431 432 /* Will be called from irq-context when using foreign fences. */ 433 spin_lock_irqsave(&engine->active.lock, flags); 434 435 __i915_request_submit(request); 436 437 spin_unlock_irqrestore(&engine->active.lock, flags); 438 } 439 440 void __i915_request_unsubmit(struct i915_request *request) 441 { 442 struct intel_engine_cs *engine = request->engine; 443 444 GEM_TRACE("%s fence %llx:%lld, current %d\n", 445 engine->name, 446 request->fence.context, request->fence.seqno, 447 hwsp_seqno(request)); 448 449 GEM_BUG_ON(!irqs_disabled()); 450 lockdep_assert_held(&engine->active.lock); 451 452 /* 453 * Only unwind in reverse order, required so that the per-context list 454 * is kept in seqno/ring order. 455 */ 456 457 /* We may be recursing from the signal callback of another i915 fence */ 458 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 459 460 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 461 i915_request_cancel_breadcrumb(request); 462 463 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 464 clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 465 466 spin_unlock(&request->lock); 467 468 /* We've already spun, don't charge on resubmitting. */ 469 if (request->sched.semaphores && i915_request_started(request)) { 470 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE; 471 request->sched.semaphores = 0; 472 } 473 474 /* 475 * We don't need to wake_up any waiters on request->execute, they 476 * will get woken by any other event or us re-adding this request 477 * to the engine timeline (__i915_request_submit()). The waiters 478 * should be quite adapt at finding that the request now has a new 479 * global_seqno to the one they went to sleep on. 480 */ 481 } 482 483 void i915_request_unsubmit(struct i915_request *request) 484 { 485 struct intel_engine_cs *engine = request->engine; 486 unsigned long flags; 487 488 /* Will be called from irq-context when using foreign fences. */ 489 spin_lock_irqsave(&engine->active.lock, flags); 490 491 __i915_request_unsubmit(request); 492 493 spin_unlock_irqrestore(&engine->active.lock, flags); 494 } 495 496 static int __i915_sw_fence_call 497 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 498 { 499 struct i915_request *request = 500 container_of(fence, typeof(*request), submit); 501 502 switch (state) { 503 case FENCE_COMPLETE: 504 trace_i915_request_submit(request); 505 506 if (unlikely(fence->error)) 507 i915_request_skip(request, fence->error); 508 509 /* 510 * We need to serialize use of the submit_request() callback 511 * with its hotplugging performed during an emergency 512 * i915_gem_set_wedged(). We use the RCU mechanism to mark the 513 * critical section in order to force i915_gem_set_wedged() to 514 * wait until the submit_request() is completed before 515 * proceeding. 516 */ 517 rcu_read_lock(); 518 request->engine->submit_request(request); 519 rcu_read_unlock(); 520 break; 521 522 case FENCE_FREE: 523 i915_request_put(request); 524 break; 525 } 526 527 return NOTIFY_DONE; 528 } 529 530 static int __i915_sw_fence_call 531 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 532 { 533 struct i915_request *request = 534 container_of(fence, typeof(*request), semaphore); 535 536 switch (state) { 537 case FENCE_COMPLETE: 538 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); 539 break; 540 541 case FENCE_FREE: 542 i915_request_put(request); 543 break; 544 } 545 546 return NOTIFY_DONE; 547 } 548 549 static void retire_requests(struct intel_timeline *tl) 550 { 551 struct i915_request *rq, *rn; 552 553 list_for_each_entry_safe(rq, rn, &tl->requests, link) 554 if (!i915_request_retire(rq)) 555 break; 556 } 557 558 static noinline struct i915_request * 559 request_alloc_slow(struct intel_timeline *tl, gfp_t gfp) 560 { 561 struct i915_request *rq; 562 563 if (list_empty(&tl->requests)) 564 goto out; 565 566 if (!gfpflags_allow_blocking(gfp)) 567 goto out; 568 569 /* Move our oldest request to the slab-cache (if not in use!) */ 570 rq = list_first_entry(&tl->requests, typeof(*rq), link); 571 i915_request_retire(rq); 572 573 rq = kmem_cache_alloc(global.slab_requests, 574 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 575 if (rq) 576 return rq; 577 578 /* Ratelimit ourselves to prevent oom from malicious clients */ 579 rq = list_last_entry(&tl->requests, typeof(*rq), link); 580 cond_synchronize_rcu(rq->rcustate); 581 582 /* Retire our old requests in the hope that we free some */ 583 retire_requests(tl); 584 585 out: 586 return kmem_cache_alloc(global.slab_requests, gfp); 587 } 588 589 struct i915_request * 590 __i915_request_create(struct intel_context *ce, gfp_t gfp) 591 { 592 struct intel_timeline *tl = ce->timeline; 593 struct i915_request *rq; 594 u32 seqno; 595 int ret; 596 597 might_sleep_if(gfpflags_allow_blocking(gfp)); 598 599 /* Check that the caller provided an already pinned context */ 600 __intel_context_pin(ce); 601 602 /* 603 * Beware: Dragons be flying overhead. 604 * 605 * We use RCU to look up requests in flight. The lookups may 606 * race with the request being allocated from the slab freelist. 607 * That is the request we are writing to here, may be in the process 608 * of being read by __i915_active_request_get_rcu(). As such, 609 * we have to be very careful when overwriting the contents. During 610 * the RCU lookup, we change chase the request->engine pointer, 611 * read the request->global_seqno and increment the reference count. 612 * 613 * The reference count is incremented atomically. If it is zero, 614 * the lookup knows the request is unallocated and complete. Otherwise, 615 * it is either still in use, or has been reallocated and reset 616 * with dma_fence_init(). This increment is safe for release as we 617 * check that the request we have a reference to and matches the active 618 * request. 619 * 620 * Before we increment the refcount, we chase the request->engine 621 * pointer. We must not call kmem_cache_zalloc() or else we set 622 * that pointer to NULL and cause a crash during the lookup. If 623 * we see the request is completed (based on the value of the 624 * old engine and seqno), the lookup is complete and reports NULL. 625 * If we decide the request is not completed (new engine or seqno), 626 * then we grab a reference and double check that it is still the 627 * active request - which it won't be and restart the lookup. 628 * 629 * Do not use kmem_cache_zalloc() here! 630 */ 631 rq = kmem_cache_alloc(global.slab_requests, 632 gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 633 if (unlikely(!rq)) { 634 rq = request_alloc_slow(tl, gfp); 635 if (!rq) { 636 ret = -ENOMEM; 637 goto err_unreserve; 638 } 639 } 640 641 ret = intel_timeline_get_seqno(tl, rq, &seqno); 642 if (ret) 643 goto err_free; 644 645 rq->i915 = ce->engine->i915; 646 rq->hw_context = ce; 647 rq->gem_context = ce->gem_context; 648 rq->engine = ce->engine; 649 rq->ring = ce->ring; 650 rq->execution_mask = ce->engine->mask; 651 652 rcu_assign_pointer(rq->timeline, tl); 653 rq->hwsp_seqno = tl->hwsp_seqno; 654 rq->hwsp_cacheline = tl->hwsp_cacheline; 655 656 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ 657 658 spin_lock_init(&rq->lock); 659 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 660 tl->fence_context, seqno); 661 662 /* We bump the ref for the fence chain */ 663 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); 664 i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify); 665 666 i915_sched_node_init(&rq->sched); 667 668 /* No zalloc, must clear what we need by hand */ 669 rq->file_priv = NULL; 670 rq->batch = NULL; 671 rq->capture_list = NULL; 672 rq->flags = 0; 673 674 INIT_LIST_HEAD(&rq->execute_cb); 675 676 /* 677 * Reserve space in the ring buffer for all the commands required to 678 * eventually emit this request. This is to guarantee that the 679 * i915_request_add() call can't fail. Note that the reserve may need 680 * to be redone if the request is not actually submitted straight 681 * away, e.g. because a GPU scheduler has deferred it. 682 * 683 * Note that due to how we add reserved_space to intel_ring_begin() 684 * we need to double our request to ensure that if we need to wrap 685 * around inside i915_request_add() there is sufficient space at 686 * the beginning of the ring as well. 687 */ 688 rq->reserved_space = 689 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32); 690 691 /* 692 * Record the position of the start of the request so that 693 * should we detect the updated seqno part-way through the 694 * GPU processing the request, we never over-estimate the 695 * position of the head. 696 */ 697 rq->head = rq->ring->emit; 698 699 ret = rq->engine->request_alloc(rq); 700 if (ret) 701 goto err_unwind; 702 703 rq->infix = rq->ring->emit; /* end of header; start of user payload */ 704 705 intel_context_mark_active(ce); 706 return rq; 707 708 err_unwind: 709 ce->ring->emit = rq->head; 710 711 /* Make sure we didn't add ourselves to external state before freeing */ 712 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); 713 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); 714 715 err_free: 716 kmem_cache_free(global.slab_requests, rq); 717 err_unreserve: 718 intel_context_unpin(ce); 719 return ERR_PTR(ret); 720 } 721 722 struct i915_request * 723 i915_request_create(struct intel_context *ce) 724 { 725 struct i915_request *rq; 726 struct intel_timeline *tl; 727 728 tl = intel_context_timeline_lock(ce); 729 if (IS_ERR(tl)) 730 return ERR_CAST(tl); 731 732 /* Move our oldest request to the slab-cache (if not in use!) */ 733 rq = list_first_entry(&tl->requests, typeof(*rq), link); 734 if (!list_is_last(&rq->link, &tl->requests)) 735 i915_request_retire(rq); 736 737 intel_context_enter(ce); 738 rq = __i915_request_create(ce, GFP_KERNEL); 739 intel_context_exit(ce); /* active reference transferred to request */ 740 if (IS_ERR(rq)) 741 goto err_unlock; 742 743 /* Check that we do not interrupt ourselves with a new request */ 744 rq->cookie = lockdep_pin_lock(&tl->mutex); 745 746 return rq; 747 748 err_unlock: 749 intel_context_timeline_unlock(tl); 750 return rq; 751 } 752 753 static int 754 i915_request_await_start(struct i915_request *rq, struct i915_request *signal) 755 { 756 struct intel_timeline *tl; 757 struct dma_fence *fence; 758 int err; 759 760 GEM_BUG_ON(i915_request_timeline(rq) == 761 rcu_access_pointer(signal->timeline)); 762 763 rcu_read_lock(); 764 tl = rcu_dereference(signal->timeline); 765 if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref)) 766 tl = NULL; 767 rcu_read_unlock(); 768 if (!tl) /* already started or maybe even completed */ 769 return 0; 770 771 fence = ERR_PTR(-EBUSY); 772 if (mutex_trylock(&tl->mutex)) { 773 fence = NULL; 774 if (!i915_request_started(signal) && 775 !list_is_first(&signal->link, &tl->requests)) { 776 signal = list_prev_entry(signal, link); 777 fence = dma_fence_get(&signal->fence); 778 } 779 mutex_unlock(&tl->mutex); 780 } 781 intel_timeline_put(tl); 782 if (IS_ERR_OR_NULL(fence)) 783 return PTR_ERR_OR_ZERO(fence); 784 785 err = 0; 786 if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) 787 err = i915_sw_fence_await_dma_fence(&rq->submit, 788 fence, 0, 789 I915_FENCE_GFP); 790 dma_fence_put(fence); 791 792 return err; 793 } 794 795 static intel_engine_mask_t 796 already_busywaiting(struct i915_request *rq) 797 { 798 /* 799 * Polling a semaphore causes bus traffic, delaying other users of 800 * both the GPU and CPU. We want to limit the impact on others, 801 * while taking advantage of early submission to reduce GPU 802 * latency. Therefore we restrict ourselves to not using more 803 * than one semaphore from each source, and not using a semaphore 804 * if we have detected the engine is saturated (i.e. would not be 805 * submitted early and cause bus traffic reading an already passed 806 * semaphore). 807 * 808 * See the are-we-too-late? check in __i915_request_submit(). 809 */ 810 return rq->sched.semaphores | rq->engine->saturated; 811 } 812 813 static int 814 emit_semaphore_wait(struct i915_request *to, 815 struct i915_request *from, 816 gfp_t gfp) 817 { 818 const int has_token = INTEL_GEN(to->i915) >= 12; 819 u32 hwsp_offset; 820 int len; 821 u32 *cs; 822 823 GEM_BUG_ON(INTEL_GEN(to->i915) < 8); 824 825 /* Just emit the first semaphore we see as request space is limited. */ 826 if (already_busywaiting(to) & from->engine->mask) 827 goto await_fence; 828 829 if (i915_request_await_start(to, from) < 0) 830 goto await_fence; 831 832 /* Only submit our spinner after the signaler is running! */ 833 if (__i915_request_await_execution(to, from, NULL, gfp)) 834 goto await_fence; 835 836 /* We need to pin the signaler's HWSP until we are finished reading. */ 837 if (intel_timeline_read_hwsp(from, to, &hwsp_offset)) 838 goto await_fence; 839 840 len = 4; 841 if (has_token) 842 len += 2; 843 844 cs = intel_ring_begin(to, len); 845 if (IS_ERR(cs)) 846 return PTR_ERR(cs); 847 848 /* 849 * Using greater-than-or-equal here means we have to worry 850 * about seqno wraparound. To side step that issue, we swap 851 * the timeline HWSP upon wrapping, so that everyone listening 852 * for the old (pre-wrap) values do not see the much smaller 853 * (post-wrap) values than they were expecting (and so wait 854 * forever). 855 */ 856 *cs++ = (MI_SEMAPHORE_WAIT | 857 MI_SEMAPHORE_GLOBAL_GTT | 858 MI_SEMAPHORE_POLL | 859 MI_SEMAPHORE_SAD_GTE_SDD) + 860 has_token; 861 *cs++ = from->fence.seqno; 862 *cs++ = hwsp_offset; 863 *cs++ = 0; 864 if (has_token) { 865 *cs++ = 0; 866 *cs++ = MI_NOOP; 867 } 868 869 intel_ring_advance(to, cs); 870 to->sched.semaphores |= from->engine->mask; 871 to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; 872 return 0; 873 874 await_fence: 875 return i915_sw_fence_await_dma_fence(&to->submit, 876 &from->fence, 0, 877 I915_FENCE_GFP); 878 } 879 880 static int 881 i915_request_await_request(struct i915_request *to, struct i915_request *from) 882 { 883 int ret; 884 885 GEM_BUG_ON(to == from); 886 GEM_BUG_ON(to->timeline == from->timeline); 887 888 if (i915_request_completed(from)) 889 return 0; 890 891 if (to->engine->schedule) { 892 ret = i915_sched_node_add_dependency(&to->sched, &from->sched); 893 if (ret < 0) 894 return ret; 895 } 896 897 if (to->engine == from->engine) { 898 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, 899 &from->submit, 900 I915_FENCE_GFP); 901 } else if (intel_engine_has_semaphores(to->engine) && 902 to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { 903 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); 904 } else { 905 ret = i915_sw_fence_await_dma_fence(&to->submit, 906 &from->fence, 0, 907 I915_FENCE_GFP); 908 } 909 if (ret < 0) 910 return ret; 911 912 if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) { 913 ret = i915_sw_fence_await_dma_fence(&to->semaphore, 914 &from->fence, 0, 915 I915_FENCE_GFP); 916 if (ret < 0) 917 return ret; 918 } 919 920 return 0; 921 } 922 923 int 924 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 925 { 926 struct dma_fence **child = &fence; 927 unsigned int nchild = 1; 928 int ret; 929 930 /* 931 * Note that if the fence-array was created in signal-on-any mode, 932 * we should *not* decompose it into its individual fences. However, 933 * we don't currently store which mode the fence-array is operating 934 * in. Fortunately, the only user of signal-on-any is private to 935 * amdgpu and we should not see any incoming fence-array from 936 * sync-file being in signal-on-any mode. 937 */ 938 if (dma_fence_is_array(fence)) { 939 struct dma_fence_array *array = to_dma_fence_array(fence); 940 941 child = array->fences; 942 nchild = array->num_fences; 943 GEM_BUG_ON(!nchild); 944 } 945 946 do { 947 fence = *child++; 948 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 949 continue; 950 951 /* 952 * Requests on the same timeline are explicitly ordered, along 953 * with their dependencies, by i915_request_add() which ensures 954 * that requests are submitted in-order through each ring. 955 */ 956 if (fence->context == rq->fence.context) 957 continue; 958 959 /* Squash repeated waits to the same timelines */ 960 if (fence->context && 961 intel_timeline_sync_is_later(i915_request_timeline(rq), 962 fence)) 963 continue; 964 965 if (dma_fence_is_i915(fence)) 966 ret = i915_request_await_request(rq, to_request(fence)); 967 else 968 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 969 fence->context ? I915_FENCE_TIMEOUT : 0, 970 I915_FENCE_GFP); 971 if (ret < 0) 972 return ret; 973 974 /* Record the latest fence used against each timeline */ 975 if (fence->context) 976 intel_timeline_sync_set(i915_request_timeline(rq), 977 fence); 978 } while (--nchild); 979 980 return 0; 981 } 982 983 int 984 i915_request_await_execution(struct i915_request *rq, 985 struct dma_fence *fence, 986 void (*hook)(struct i915_request *rq, 987 struct dma_fence *signal)) 988 { 989 struct dma_fence **child = &fence; 990 unsigned int nchild = 1; 991 int ret; 992 993 if (dma_fence_is_array(fence)) { 994 struct dma_fence_array *array = to_dma_fence_array(fence); 995 996 /* XXX Error for signal-on-any fence arrays */ 997 998 child = array->fences; 999 nchild = array->num_fences; 1000 GEM_BUG_ON(!nchild); 1001 } 1002 1003 do { 1004 fence = *child++; 1005 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 1006 continue; 1007 1008 /* 1009 * We don't squash repeated fence dependencies here as we 1010 * want to run our callback in all cases. 1011 */ 1012 1013 if (dma_fence_is_i915(fence)) 1014 ret = __i915_request_await_execution(rq, 1015 to_request(fence), 1016 hook, 1017 I915_FENCE_GFP); 1018 else 1019 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 1020 I915_FENCE_TIMEOUT, 1021 GFP_KERNEL); 1022 if (ret < 0) 1023 return ret; 1024 } while (--nchild); 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * i915_request_await_object - set this request to (async) wait upon a bo 1031 * @to: request we are wishing to use 1032 * @obj: object which may be in use on another ring. 1033 * @write: whether the wait is on behalf of a writer 1034 * 1035 * This code is meant to abstract object synchronization with the GPU. 1036 * Conceptually we serialise writes between engines inside the GPU. 1037 * We only allow one engine to write into a buffer at any time, but 1038 * multiple readers. To ensure each has a coherent view of memory, we must: 1039 * 1040 * - If there is an outstanding write request to the object, the new 1041 * request must wait for it to complete (either CPU or in hw, requests 1042 * on the same ring will be naturally ordered). 1043 * 1044 * - If we are a write request (pending_write_domain is set), the new 1045 * request must wait for outstanding read requests to complete. 1046 * 1047 * Returns 0 if successful, else propagates up the lower layer error. 1048 */ 1049 int 1050 i915_request_await_object(struct i915_request *to, 1051 struct drm_i915_gem_object *obj, 1052 bool write) 1053 { 1054 struct dma_fence *excl; 1055 int ret = 0; 1056 1057 if (write) { 1058 struct dma_fence **shared; 1059 unsigned int count, i; 1060 1061 ret = dma_resv_get_fences_rcu(obj->base.resv, 1062 &excl, &count, &shared); 1063 if (ret) 1064 return ret; 1065 1066 for (i = 0; i < count; i++) { 1067 ret = i915_request_await_dma_fence(to, shared[i]); 1068 if (ret) 1069 break; 1070 1071 dma_fence_put(shared[i]); 1072 } 1073 1074 for (; i < count; i++) 1075 dma_fence_put(shared[i]); 1076 kfree(shared); 1077 } else { 1078 excl = dma_resv_get_excl_rcu(obj->base.resv); 1079 } 1080 1081 if (excl) { 1082 if (ret == 0) 1083 ret = i915_request_await_dma_fence(to, excl); 1084 1085 dma_fence_put(excl); 1086 } 1087 1088 return ret; 1089 } 1090 1091 void i915_request_skip(struct i915_request *rq, int error) 1092 { 1093 void *vaddr = rq->ring->vaddr; 1094 u32 head; 1095 1096 GEM_BUG_ON(!IS_ERR_VALUE((long)error)); 1097 dma_fence_set_error(&rq->fence, error); 1098 1099 if (rq->infix == rq->postfix) 1100 return; 1101 1102 /* 1103 * As this request likely depends on state from the lost 1104 * context, clear out all the user operations leaving the 1105 * breadcrumb at the end (so we get the fence notifications). 1106 */ 1107 head = rq->infix; 1108 if (rq->postfix < head) { 1109 memset(vaddr + head, 0, rq->ring->size - head); 1110 head = 0; 1111 } 1112 memset(vaddr + head, 0, rq->postfix - head); 1113 rq->infix = rq->postfix; 1114 } 1115 1116 static struct i915_request * 1117 __i915_request_add_to_timeline(struct i915_request *rq) 1118 { 1119 struct intel_timeline *timeline = i915_request_timeline(rq); 1120 struct i915_request *prev; 1121 1122 /* 1123 * Dependency tracking and request ordering along the timeline 1124 * is special cased so that we can eliminate redundant ordering 1125 * operations while building the request (we know that the timeline 1126 * itself is ordered, and here we guarantee it). 1127 * 1128 * As we know we will need to emit tracking along the timeline, 1129 * we embed the hooks into our request struct -- at the cost of 1130 * having to have specialised no-allocation interfaces (which will 1131 * be beneficial elsewhere). 1132 * 1133 * A second benefit to open-coding i915_request_await_request is 1134 * that we can apply a slight variant of the rules specialised 1135 * for timelines that jump between engines (such as virtual engines). 1136 * If we consider the case of virtual engine, we must emit a dma-fence 1137 * to prevent scheduling of the second request until the first is 1138 * complete (to maximise our greedy late load balancing) and this 1139 * precludes optimising to use semaphores serialisation of a single 1140 * timeline across engines. 1141 */ 1142 prev = to_request(__i915_active_fence_set(&timeline->last_request, 1143 &rq->fence)); 1144 if (prev && !i915_request_completed(prev)) { 1145 if (is_power_of_2(prev->engine->mask | rq->engine->mask)) 1146 i915_sw_fence_await_sw_fence(&rq->submit, 1147 &prev->submit, 1148 &rq->submitq); 1149 else 1150 __i915_sw_fence_await_dma_fence(&rq->submit, 1151 &prev->fence, 1152 &rq->dmaq); 1153 if (rq->engine->schedule) 1154 __i915_sched_node_add_dependency(&rq->sched, 1155 &prev->sched, 1156 &rq->dep, 1157 0); 1158 } 1159 1160 list_add_tail(&rq->link, &timeline->requests); 1161 1162 /* 1163 * Make sure that no request gazumped us - if it was allocated after 1164 * our i915_request_alloc() and called __i915_request_add() before 1165 * us, the timeline will hold its seqno which is later than ours. 1166 */ 1167 GEM_BUG_ON(timeline->seqno != rq->fence.seqno); 1168 1169 return prev; 1170 } 1171 1172 /* 1173 * NB: This function is not allowed to fail. Doing so would mean the the 1174 * request is not being tracked for completion but the work itself is 1175 * going to happen on the hardware. This would be a Bad Thing(tm). 1176 */ 1177 struct i915_request *__i915_request_commit(struct i915_request *rq) 1178 { 1179 struct intel_engine_cs *engine = rq->engine; 1180 struct intel_ring *ring = rq->ring; 1181 u32 *cs; 1182 1183 GEM_TRACE("%s fence %llx:%lld\n", 1184 engine->name, rq->fence.context, rq->fence.seqno); 1185 1186 /* 1187 * To ensure that this call will not fail, space for its emissions 1188 * should already have been reserved in the ring buffer. Let the ring 1189 * know that it is time to use that space up. 1190 */ 1191 GEM_BUG_ON(rq->reserved_space > ring->space); 1192 rq->reserved_space = 0; 1193 rq->emitted_jiffies = jiffies; 1194 1195 /* 1196 * Record the position of the start of the breadcrumb so that 1197 * should we detect the updated seqno part-way through the 1198 * GPU processing the request, we never over-estimate the 1199 * position of the ring's HEAD. 1200 */ 1201 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw); 1202 GEM_BUG_ON(IS_ERR(cs)); 1203 rq->postfix = intel_ring_offset(rq, cs); 1204 1205 return __i915_request_add_to_timeline(rq); 1206 } 1207 1208 void __i915_request_queue(struct i915_request *rq, 1209 const struct i915_sched_attr *attr) 1210 { 1211 /* 1212 * Let the backend know a new request has arrived that may need 1213 * to adjust the existing execution schedule due to a high priority 1214 * request - i.e. we may want to preempt the current request in order 1215 * to run a high priority dependency chain *before* we can execute this 1216 * request. 1217 * 1218 * This is called before the request is ready to run so that we can 1219 * decide whether to preempt the entire chain so that it is ready to 1220 * run at the earliest possible convenience. 1221 */ 1222 i915_sw_fence_commit(&rq->semaphore); 1223 if (attr && rq->engine->schedule) 1224 rq->engine->schedule(rq, attr); 1225 i915_sw_fence_commit(&rq->submit); 1226 } 1227 1228 void i915_request_add(struct i915_request *rq) 1229 { 1230 struct i915_sched_attr attr = rq->gem_context->sched; 1231 struct intel_timeline * const tl = i915_request_timeline(rq); 1232 struct i915_request *prev; 1233 1234 lockdep_assert_held(&tl->mutex); 1235 lockdep_unpin_lock(&tl->mutex, rq->cookie); 1236 1237 trace_i915_request_add(rq); 1238 1239 prev = __i915_request_commit(rq); 1240 1241 /* 1242 * Boost actual workloads past semaphores! 1243 * 1244 * With semaphores we spin on one engine waiting for another, 1245 * simply to reduce the latency of starting our work when 1246 * the signaler completes. However, if there is any other 1247 * work that we could be doing on this engine instead, that 1248 * is better utilisation and will reduce the overall duration 1249 * of the current work. To avoid PI boosting a semaphore 1250 * far in the distance past over useful work, we keep a history 1251 * of any semaphore use along our dependency chain. 1252 */ 1253 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN)) 1254 attr.priority |= I915_PRIORITY_NOSEMAPHORE; 1255 1256 /* 1257 * Boost priorities to new clients (new request flows). 1258 * 1259 * Allow interactive/synchronous clients to jump ahead of 1260 * the bulk clients. (FQ_CODEL) 1261 */ 1262 if (list_empty(&rq->sched.signalers_list)) 1263 attr.priority |= I915_PRIORITY_WAIT; 1264 1265 local_bh_disable(); 1266 __i915_request_queue(rq, &attr); 1267 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1268 1269 /* 1270 * In typical scenarios, we do not expect the previous request on 1271 * the timeline to be still tracked by timeline->last_request if it 1272 * has been completed. If the completed request is still here, that 1273 * implies that request retirement is a long way behind submission, 1274 * suggesting that we haven't been retiring frequently enough from 1275 * the combination of retire-before-alloc, waiters and the background 1276 * retirement worker. So if the last request on this timeline was 1277 * already completed, do a catch up pass, flushing the retirement queue 1278 * up to this client. Since we have now moved the heaviest operations 1279 * during retirement onto secondary workers, such as freeing objects 1280 * or contexts, retiring a bunch of requests is mostly list management 1281 * (and cache misses), and so we should not be overly penalizing this 1282 * client by performing excess work, though we may still performing 1283 * work on behalf of others -- but instead we should benefit from 1284 * improved resource management. (Well, that's the theory at least.) 1285 */ 1286 if (prev && 1287 i915_request_completed(prev) && 1288 rcu_access_pointer(prev->timeline) == tl) 1289 i915_request_retire_upto(prev); 1290 1291 mutex_unlock(&tl->mutex); 1292 } 1293 1294 static unsigned long local_clock_us(unsigned int *cpu) 1295 { 1296 unsigned long t; 1297 1298 /* 1299 * Cheaply and approximately convert from nanoseconds to microseconds. 1300 * The result and subsequent calculations are also defined in the same 1301 * approximate microseconds units. The principal source of timing 1302 * error here is from the simple truncation. 1303 * 1304 * Note that local_clock() is only defined wrt to the current CPU; 1305 * the comparisons are no longer valid if we switch CPUs. Instead of 1306 * blocking preemption for the entire busywait, we can detect the CPU 1307 * switch and use that as indicator of system load and a reason to 1308 * stop busywaiting, see busywait_stop(). 1309 */ 1310 *cpu = get_cpu(); 1311 t = local_clock() >> 10; 1312 put_cpu(); 1313 1314 return t; 1315 } 1316 1317 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 1318 { 1319 unsigned int this_cpu; 1320 1321 if (time_after(local_clock_us(&this_cpu), timeout)) 1322 return true; 1323 1324 return this_cpu != cpu; 1325 } 1326 1327 static bool __i915_spin_request(const struct i915_request * const rq, 1328 int state, unsigned long timeout_us) 1329 { 1330 unsigned int cpu; 1331 1332 /* 1333 * Only wait for the request if we know it is likely to complete. 1334 * 1335 * We don't track the timestamps around requests, nor the average 1336 * request length, so we do not have a good indicator that this 1337 * request will complete within the timeout. What we do know is the 1338 * order in which requests are executed by the context and so we can 1339 * tell if the request has been started. If the request is not even 1340 * running yet, it is a fair assumption that it will not complete 1341 * within our relatively short timeout. 1342 */ 1343 if (!i915_request_is_running(rq)) 1344 return false; 1345 1346 /* 1347 * When waiting for high frequency requests, e.g. during synchronous 1348 * rendering split between the CPU and GPU, the finite amount of time 1349 * required to set up the irq and wait upon it limits the response 1350 * rate. By busywaiting on the request completion for a short while we 1351 * can service the high frequency waits as quick as possible. However, 1352 * if it is a slow request, we want to sleep as quickly as possible. 1353 * The tradeoff between waiting and sleeping is roughly the time it 1354 * takes to sleep on a request, on the order of a microsecond. 1355 */ 1356 1357 timeout_us += local_clock_us(&cpu); 1358 do { 1359 if (i915_request_completed(rq)) 1360 return true; 1361 1362 if (signal_pending_state(state, current)) 1363 break; 1364 1365 if (busywait_stop(timeout_us, cpu)) 1366 break; 1367 1368 cpu_relax(); 1369 } while (!need_resched()); 1370 1371 return false; 1372 } 1373 1374 struct request_wait { 1375 struct dma_fence_cb cb; 1376 struct task_struct *tsk; 1377 }; 1378 1379 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) 1380 { 1381 struct request_wait *wait = container_of(cb, typeof(*wait), cb); 1382 1383 wake_up_process(wait->tsk); 1384 } 1385 1386 /** 1387 * i915_request_wait - wait until execution of request has finished 1388 * @rq: the request to wait upon 1389 * @flags: how to wait 1390 * @timeout: how long to wait in jiffies 1391 * 1392 * i915_request_wait() waits for the request to be completed, for a 1393 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1394 * unbounded wait). 1395 * 1396 * Returns the remaining time (in jiffies) if the request completed, which may 1397 * be zero or -ETIME if the request is unfinished after the timeout expires. 1398 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1399 * pending before the request completes. 1400 */ 1401 long i915_request_wait(struct i915_request *rq, 1402 unsigned int flags, 1403 long timeout) 1404 { 1405 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1406 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1407 struct request_wait wait; 1408 1409 might_sleep(); 1410 GEM_BUG_ON(timeout < 0); 1411 1412 if (dma_fence_is_signaled(&rq->fence)) 1413 return timeout; 1414 1415 if (!timeout) 1416 return -ETIME; 1417 1418 trace_i915_request_wait_begin(rq, flags); 1419 1420 /* 1421 * We must never wait on the GPU while holding a lock as we 1422 * may need to perform a GPU reset. So while we don't need to 1423 * serialise wait/reset with an explicit lock, we do want 1424 * lockdep to detect potential dependency cycles. 1425 */ 1426 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_); 1427 1428 /* 1429 * Optimistic spin before touching IRQs. 1430 * 1431 * We may use a rather large value here to offset the penalty of 1432 * switching away from the active task. Frequently, the client will 1433 * wait upon an old swapbuffer to throttle itself to remain within a 1434 * frame of the gpu. If the client is running in lockstep with the gpu, 1435 * then it should not be waiting long at all, and a sleep now will incur 1436 * extra scheduler latency in producing the next frame. To try to 1437 * avoid adding the cost of enabling/disabling the interrupt to the 1438 * short wait, we first spin to see if the request would have completed 1439 * in the time taken to setup the interrupt. 1440 * 1441 * We need upto 5us to enable the irq, and upto 20us to hide the 1442 * scheduler latency of a context switch, ignoring the secondary 1443 * impacts from a context switch such as cache eviction. 1444 * 1445 * The scheme used for low-latency IO is called "hybrid interrupt 1446 * polling". The suggestion there is to sleep until just before you 1447 * expect to be woken by the device interrupt and then poll for its 1448 * completion. That requires having a good predictor for the request 1449 * duration, which we currently lack. 1450 */ 1451 if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) && 1452 __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) { 1453 dma_fence_signal(&rq->fence); 1454 goto out; 1455 } 1456 1457 /* 1458 * This client is about to stall waiting for the GPU. In many cases 1459 * this is undesirable and limits the throughput of the system, as 1460 * many clients cannot continue processing user input/output whilst 1461 * blocked. RPS autotuning may take tens of milliseconds to respond 1462 * to the GPU load and thus incurs additional latency for the client. 1463 * We can circumvent that by promoting the GPU frequency to maximum 1464 * before we sleep. This makes the GPU throttle up much more quickly 1465 * (good for benchmarks and user experience, e.g. window animations), 1466 * but at a cost of spending more power processing the workload 1467 * (bad for battery). 1468 */ 1469 if (flags & I915_WAIT_PRIORITY) { 1470 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) 1471 intel_rps_boost(rq); 1472 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); 1473 } 1474 1475 wait.tsk = current; 1476 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) 1477 goto out; 1478 1479 for (;;) { 1480 set_current_state(state); 1481 1482 if (i915_request_completed(rq)) { 1483 dma_fence_signal(&rq->fence); 1484 break; 1485 } 1486 1487 if (signal_pending_state(state, current)) { 1488 timeout = -ERESTARTSYS; 1489 break; 1490 } 1491 1492 if (!timeout) { 1493 timeout = -ETIME; 1494 break; 1495 } 1496 1497 intel_engine_flush_submission(rq->engine); 1498 timeout = io_schedule_timeout(timeout); 1499 } 1500 __set_current_state(TASK_RUNNING); 1501 1502 dma_fence_remove_callback(&rq->fence, &wait.cb); 1503 1504 out: 1505 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); 1506 trace_i915_request_wait_end(rq); 1507 return timeout; 1508 } 1509 1510 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1511 #include "selftests/mock_request.c" 1512 #include "selftests/i915_request.c" 1513 #endif 1514 1515 static void i915_global_request_shrink(void) 1516 { 1517 kmem_cache_shrink(global.slab_dependencies); 1518 kmem_cache_shrink(global.slab_execute_cbs); 1519 kmem_cache_shrink(global.slab_requests); 1520 } 1521 1522 static void i915_global_request_exit(void) 1523 { 1524 kmem_cache_destroy(global.slab_dependencies); 1525 kmem_cache_destroy(global.slab_execute_cbs); 1526 kmem_cache_destroy(global.slab_requests); 1527 } 1528 1529 static struct i915_global_request global = { { 1530 .shrink = i915_global_request_shrink, 1531 .exit = i915_global_request_exit, 1532 } }; 1533 1534 int __init i915_global_request_init(void) 1535 { 1536 global.slab_requests = KMEM_CACHE(i915_request, 1537 SLAB_HWCACHE_ALIGN | 1538 SLAB_RECLAIM_ACCOUNT | 1539 SLAB_TYPESAFE_BY_RCU); 1540 if (!global.slab_requests) 1541 return -ENOMEM; 1542 1543 global.slab_execute_cbs = KMEM_CACHE(execute_cb, 1544 SLAB_HWCACHE_ALIGN | 1545 SLAB_RECLAIM_ACCOUNT | 1546 SLAB_TYPESAFE_BY_RCU); 1547 if (!global.slab_execute_cbs) 1548 goto err_requests; 1549 1550 global.slab_dependencies = KMEM_CACHE(i915_dependency, 1551 SLAB_HWCACHE_ALIGN | 1552 SLAB_RECLAIM_ACCOUNT); 1553 if (!global.slab_dependencies) 1554 goto err_execute_cbs; 1555 1556 i915_global_register(&global.base); 1557 return 0; 1558 1559 err_execute_cbs: 1560 kmem_cache_destroy(global.slab_execute_cbs); 1561 err_requests: 1562 kmem_cache_destroy(global.slab_requests); 1563 return -ENOMEM; 1564 } 1565