1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/prefetch.h> 26 #include <linux/dma-fence-array.h> 27 #include <linux/sched.h> 28 #include <linux/sched/clock.h> 29 #include <linux/sched/signal.h> 30 31 #include "i915_drv.h" 32 #include "i915_reset.h" 33 34 static const char *i915_fence_get_driver_name(struct dma_fence *fence) 35 { 36 return "i915"; 37 } 38 39 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 40 { 41 /* 42 * The timeline struct (as part of the ppgtt underneath a context) 43 * may be freed when the request is no longer in use by the GPU. 44 * We could extend the life of a context to beyond that of all 45 * fences, possibly keeping the hw resource around indefinitely, 46 * or we just give them a false name. Since 47 * dma_fence_ops.get_timeline_name is a debug feature, the occasional 48 * lie seems justifiable. 49 */ 50 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 51 return "signaled"; 52 53 return to_request(fence)->timeline->name; 54 } 55 56 static bool i915_fence_signaled(struct dma_fence *fence) 57 { 58 return i915_request_completed(to_request(fence)); 59 } 60 61 static bool i915_fence_enable_signaling(struct dma_fence *fence) 62 { 63 return i915_request_enable_breadcrumb(to_request(fence)); 64 } 65 66 static signed long i915_fence_wait(struct dma_fence *fence, 67 bool interruptible, 68 signed long timeout) 69 { 70 return i915_request_wait(to_request(fence), interruptible, timeout); 71 } 72 73 static void i915_fence_release(struct dma_fence *fence) 74 { 75 struct i915_request *rq = to_request(fence); 76 77 /* 78 * The request is put onto a RCU freelist (i.e. the address 79 * is immediately reused), mark the fences as being freed now. 80 * Otherwise the debugobjects for the fences are only marked as 81 * freed when the slab cache itself is freed, and so we would get 82 * caught trying to reuse dead objects. 83 */ 84 i915_sw_fence_fini(&rq->submit); 85 86 kmem_cache_free(rq->i915->requests, rq); 87 } 88 89 const struct dma_fence_ops i915_fence_ops = { 90 .get_driver_name = i915_fence_get_driver_name, 91 .get_timeline_name = i915_fence_get_timeline_name, 92 .enable_signaling = i915_fence_enable_signaling, 93 .signaled = i915_fence_signaled, 94 .wait = i915_fence_wait, 95 .release = i915_fence_release, 96 }; 97 98 static inline void 99 i915_request_remove_from_client(struct i915_request *request) 100 { 101 struct drm_i915_file_private *file_priv; 102 103 file_priv = request->file_priv; 104 if (!file_priv) 105 return; 106 107 spin_lock(&file_priv->mm.lock); 108 if (request->file_priv) { 109 list_del(&request->client_link); 110 request->file_priv = NULL; 111 } 112 spin_unlock(&file_priv->mm.lock); 113 } 114 115 static void reserve_gt(struct drm_i915_private *i915) 116 { 117 if (!i915->gt.active_requests++) 118 i915_gem_unpark(i915); 119 } 120 121 static void unreserve_gt(struct drm_i915_private *i915) 122 { 123 GEM_BUG_ON(!i915->gt.active_requests); 124 if (!--i915->gt.active_requests) 125 i915_gem_park(i915); 126 } 127 128 void i915_gem_retire_noop(struct i915_gem_active *active, 129 struct i915_request *request) 130 { 131 /* Space left intentionally blank */ 132 } 133 134 static void advance_ring(struct i915_request *request) 135 { 136 struct intel_ring *ring = request->ring; 137 unsigned int tail; 138 139 /* 140 * We know the GPU must have read the request to have 141 * sent us the seqno + interrupt, so use the position 142 * of tail of the request to update the last known position 143 * of the GPU head. 144 * 145 * Note this requires that we are always called in request 146 * completion order. 147 */ 148 GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list)); 149 if (list_is_last(&request->ring_link, &ring->request_list)) { 150 /* 151 * We may race here with execlists resubmitting this request 152 * as we retire it. The resubmission will move the ring->tail 153 * forwards (to request->wa_tail). We either read the 154 * current value that was written to hw, or the value that 155 * is just about to be. Either works, if we miss the last two 156 * noops - they are safe to be replayed on a reset. 157 */ 158 GEM_TRACE("marking %s as inactive\n", ring->timeline->name); 159 tail = READ_ONCE(request->tail); 160 list_del(&ring->active_link); 161 } else { 162 tail = request->postfix; 163 } 164 list_del_init(&request->ring_link); 165 166 ring->head = tail; 167 } 168 169 static void free_capture_list(struct i915_request *request) 170 { 171 struct i915_capture_list *capture; 172 173 capture = request->capture_list; 174 while (capture) { 175 struct i915_capture_list *next = capture->next; 176 177 kfree(capture); 178 capture = next; 179 } 180 } 181 182 static void __retire_engine_request(struct intel_engine_cs *engine, 183 struct i915_request *rq) 184 { 185 GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n", 186 __func__, engine->name, 187 rq->fence.context, rq->fence.seqno, 188 rq->global_seqno, 189 hwsp_seqno(rq), 190 intel_engine_get_seqno(engine)); 191 192 GEM_BUG_ON(!i915_request_completed(rq)); 193 194 local_irq_disable(); 195 196 spin_lock(&engine->timeline.lock); 197 GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); 198 list_del_init(&rq->link); 199 spin_unlock(&engine->timeline.lock); 200 201 spin_lock(&rq->lock); 202 i915_request_mark_complete(rq); 203 if (!i915_request_signaled(rq)) 204 dma_fence_signal_locked(&rq->fence); 205 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) 206 i915_request_cancel_breadcrumb(rq); 207 if (rq->waitboost) { 208 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); 209 atomic_dec(&rq->i915->gt_pm.rps.num_waiters); 210 } 211 spin_unlock(&rq->lock); 212 213 local_irq_enable(); 214 215 /* 216 * The backing object for the context is done after switching to the 217 * *next* context. Therefore we cannot retire the previous context until 218 * the next context has already started running. However, since we 219 * cannot take the required locks at i915_request_submit() we 220 * defer the unpinning of the active context to now, retirement of 221 * the subsequent request. 222 */ 223 if (engine->last_retired_context) 224 intel_context_unpin(engine->last_retired_context); 225 engine->last_retired_context = rq->hw_context; 226 } 227 228 static void __retire_engine_upto(struct intel_engine_cs *engine, 229 struct i915_request *rq) 230 { 231 struct i915_request *tmp; 232 233 if (list_empty(&rq->link)) 234 return; 235 236 do { 237 tmp = list_first_entry(&engine->timeline.requests, 238 typeof(*tmp), link); 239 240 GEM_BUG_ON(tmp->engine != engine); 241 __retire_engine_request(engine, tmp); 242 } while (tmp != rq); 243 } 244 245 static void i915_request_retire(struct i915_request *request) 246 { 247 struct i915_gem_active *active, *next; 248 249 GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", 250 request->engine->name, 251 request->fence.context, request->fence.seqno, 252 request->global_seqno, 253 hwsp_seqno(request), 254 intel_engine_get_seqno(request->engine)); 255 256 lockdep_assert_held(&request->i915->drm.struct_mutex); 257 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); 258 GEM_BUG_ON(!i915_request_completed(request)); 259 260 trace_i915_request_retire(request); 261 262 advance_ring(request); 263 free_capture_list(request); 264 265 /* 266 * Walk through the active list, calling retire on each. This allows 267 * objects to track their GPU activity and mark themselves as idle 268 * when their *last* active request is completed (updating state 269 * tracking lists for eviction, active references for GEM, etc). 270 * 271 * As the ->retire() may free the node, we decouple it first and 272 * pass along the auxiliary information (to avoid dereferencing 273 * the node after the callback). 274 */ 275 list_for_each_entry_safe(active, next, &request->active_list, link) { 276 /* 277 * In microbenchmarks or focusing upon time inside the kernel, 278 * we may spend an inordinate amount of time simply handling 279 * the retirement of requests and processing their callbacks. 280 * Of which, this loop itself is particularly hot due to the 281 * cache misses when jumping around the list of i915_gem_active. 282 * So we try to keep this loop as streamlined as possible and 283 * also prefetch the next i915_gem_active to try and hide 284 * the likely cache miss. 285 */ 286 prefetchw(next); 287 288 INIT_LIST_HEAD(&active->link); 289 RCU_INIT_POINTER(active->request, NULL); 290 291 active->retire(active, request); 292 } 293 294 i915_request_remove_from_client(request); 295 296 /* Retirement decays the ban score as it is a sign of ctx progress */ 297 atomic_dec_if_positive(&request->gem_context->ban_score); 298 intel_context_unpin(request->hw_context); 299 300 __retire_engine_upto(request->engine, request); 301 302 unreserve_gt(request->i915); 303 304 i915_sched_node_fini(request->i915, &request->sched); 305 i915_request_put(request); 306 } 307 308 void i915_request_retire_upto(struct i915_request *rq) 309 { 310 struct intel_ring *ring = rq->ring; 311 struct i915_request *tmp; 312 313 GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", 314 rq->engine->name, 315 rq->fence.context, rq->fence.seqno, 316 rq->global_seqno, 317 hwsp_seqno(rq), 318 intel_engine_get_seqno(rq->engine)); 319 320 lockdep_assert_held(&rq->i915->drm.struct_mutex); 321 GEM_BUG_ON(!i915_request_completed(rq)); 322 323 if (list_empty(&rq->ring_link)) 324 return; 325 326 do { 327 tmp = list_first_entry(&ring->request_list, 328 typeof(*tmp), ring_link); 329 330 i915_request_retire(tmp); 331 } while (tmp != rq); 332 } 333 334 static u32 timeline_get_seqno(struct i915_timeline *tl) 335 { 336 return tl->seqno += 1 + tl->has_initial_breadcrumb; 337 } 338 339 static void move_to_timeline(struct i915_request *request, 340 struct i915_timeline *timeline) 341 { 342 GEM_BUG_ON(request->timeline == &request->engine->timeline); 343 lockdep_assert_held(&request->engine->timeline.lock); 344 345 spin_lock(&request->timeline->lock); 346 list_move_tail(&request->link, &timeline->requests); 347 spin_unlock(&request->timeline->lock); 348 } 349 350 static u32 next_global_seqno(struct i915_timeline *tl) 351 { 352 if (!++tl->seqno) 353 ++tl->seqno; 354 return tl->seqno; 355 } 356 357 void __i915_request_submit(struct i915_request *request) 358 { 359 struct intel_engine_cs *engine = request->engine; 360 u32 seqno; 361 362 GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n", 363 engine->name, 364 request->fence.context, request->fence.seqno, 365 engine->timeline.seqno + 1, 366 hwsp_seqno(request), 367 intel_engine_get_seqno(engine)); 368 369 GEM_BUG_ON(!irqs_disabled()); 370 lockdep_assert_held(&engine->timeline.lock); 371 372 GEM_BUG_ON(request->global_seqno); 373 374 seqno = next_global_seqno(&engine->timeline); 375 GEM_BUG_ON(!seqno); 376 GEM_BUG_ON(intel_engine_signaled(engine, seqno)); 377 378 /* We may be recursing from the signal callback of another i915 fence */ 379 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 380 GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 381 set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 382 request->global_seqno = seqno; 383 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && 384 !i915_request_enable_breadcrumb(request)) 385 intel_engine_queue_breadcrumbs(engine); 386 spin_unlock(&request->lock); 387 388 engine->emit_fini_breadcrumb(request, 389 request->ring->vaddr + request->postfix); 390 391 /* Transfer from per-context onto the global per-engine timeline */ 392 move_to_timeline(request, &engine->timeline); 393 394 trace_i915_request_execute(request); 395 } 396 397 void i915_request_submit(struct i915_request *request) 398 { 399 struct intel_engine_cs *engine = request->engine; 400 unsigned long flags; 401 402 /* Will be called from irq-context when using foreign fences. */ 403 spin_lock_irqsave(&engine->timeline.lock, flags); 404 405 __i915_request_submit(request); 406 407 spin_unlock_irqrestore(&engine->timeline.lock, flags); 408 } 409 410 void __i915_request_unsubmit(struct i915_request *request) 411 { 412 struct intel_engine_cs *engine = request->engine; 413 414 GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n", 415 engine->name, 416 request->fence.context, request->fence.seqno, 417 request->global_seqno, 418 hwsp_seqno(request), 419 intel_engine_get_seqno(engine)); 420 421 GEM_BUG_ON(!irqs_disabled()); 422 lockdep_assert_held(&engine->timeline.lock); 423 424 /* 425 * Only unwind in reverse order, required so that the per-context list 426 * is kept in seqno/ring order. 427 */ 428 GEM_BUG_ON(!request->global_seqno); 429 GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); 430 GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); 431 engine->timeline.seqno--; 432 433 /* We may be recursing from the signal callback of another i915 fence */ 434 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 435 request->global_seqno = 0; 436 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 437 i915_request_cancel_breadcrumb(request); 438 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); 439 clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); 440 spin_unlock(&request->lock); 441 442 /* Transfer back from the global per-engine timeline to per-context */ 443 move_to_timeline(request, request->timeline); 444 445 /* 446 * We don't need to wake_up any waiters on request->execute, they 447 * will get woken by any other event or us re-adding this request 448 * to the engine timeline (__i915_request_submit()). The waiters 449 * should be quite adapt at finding that the request now has a new 450 * global_seqno to the one they went to sleep on. 451 */ 452 } 453 454 void i915_request_unsubmit(struct i915_request *request) 455 { 456 struct intel_engine_cs *engine = request->engine; 457 unsigned long flags; 458 459 /* Will be called from irq-context when using foreign fences. */ 460 spin_lock_irqsave(&engine->timeline.lock, flags); 461 462 __i915_request_unsubmit(request); 463 464 spin_unlock_irqrestore(&engine->timeline.lock, flags); 465 } 466 467 static int __i915_sw_fence_call 468 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 469 { 470 struct i915_request *request = 471 container_of(fence, typeof(*request), submit); 472 473 switch (state) { 474 case FENCE_COMPLETE: 475 trace_i915_request_submit(request); 476 /* 477 * We need to serialize use of the submit_request() callback 478 * with its hotplugging performed during an emergency 479 * i915_gem_set_wedged(). We use the RCU mechanism to mark the 480 * critical section in order to force i915_gem_set_wedged() to 481 * wait until the submit_request() is completed before 482 * proceeding. 483 */ 484 rcu_read_lock(); 485 request->engine->submit_request(request); 486 rcu_read_unlock(); 487 break; 488 489 case FENCE_FREE: 490 i915_request_put(request); 491 break; 492 } 493 494 return NOTIFY_DONE; 495 } 496 497 static void ring_retire_requests(struct intel_ring *ring) 498 { 499 struct i915_request *rq, *rn; 500 501 list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { 502 if (!i915_request_completed(rq)) 503 break; 504 505 i915_request_retire(rq); 506 } 507 } 508 509 static noinline struct i915_request * 510 i915_request_alloc_slow(struct intel_context *ce) 511 { 512 struct intel_ring *ring = ce->ring; 513 struct i915_request *rq; 514 515 if (list_empty(&ring->request_list)) 516 goto out; 517 518 /* Ratelimit ourselves to prevent oom from malicious clients */ 519 rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); 520 cond_synchronize_rcu(rq->rcustate); 521 522 /* Retire our old requests in the hope that we free some */ 523 ring_retire_requests(ring); 524 525 out: 526 return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL); 527 } 528 529 /** 530 * i915_request_alloc - allocate a request structure 531 * 532 * @engine: engine that we wish to issue the request on. 533 * @ctx: context that the request will be associated with. 534 * 535 * Returns a pointer to the allocated request if successful, 536 * or an error code if not. 537 */ 538 struct i915_request * 539 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) 540 { 541 struct drm_i915_private *i915 = engine->i915; 542 struct i915_request *rq; 543 struct intel_context *ce; 544 int ret; 545 546 lockdep_assert_held(&i915->drm.struct_mutex); 547 548 /* 549 * Preempt contexts are reserved for exclusive use to inject a 550 * preemption context switch. They are never to be used for any trivial 551 * request! 552 */ 553 GEM_BUG_ON(ctx == i915->preempt_context); 554 555 /* 556 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 557 * EIO if the GPU is already wedged. 558 */ 559 if (i915_terminally_wedged(&i915->gpu_error)) 560 return ERR_PTR(-EIO); 561 562 /* 563 * Pinning the contexts may generate requests in order to acquire 564 * GGTT space, so do this first before we reserve a seqno for 565 * ourselves. 566 */ 567 ce = intel_context_pin(ctx, engine); 568 if (IS_ERR(ce)) 569 return ERR_CAST(ce); 570 571 reserve_gt(i915); 572 573 /* Move our oldest request to the slab-cache (if not in use!) */ 574 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); 575 if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && 576 i915_request_completed(rq)) 577 i915_request_retire(rq); 578 579 /* 580 * Beware: Dragons be flying overhead. 581 * 582 * We use RCU to look up requests in flight. The lookups may 583 * race with the request being allocated from the slab freelist. 584 * That is the request we are writing to here, may be in the process 585 * of being read by __i915_gem_active_get_rcu(). As such, 586 * we have to be very careful when overwriting the contents. During 587 * the RCU lookup, we change chase the request->engine pointer, 588 * read the request->global_seqno and increment the reference count. 589 * 590 * The reference count is incremented atomically. If it is zero, 591 * the lookup knows the request is unallocated and complete. Otherwise, 592 * it is either still in use, or has been reallocated and reset 593 * with dma_fence_init(). This increment is safe for release as we 594 * check that the request we have a reference to and matches the active 595 * request. 596 * 597 * Before we increment the refcount, we chase the request->engine 598 * pointer. We must not call kmem_cache_zalloc() or else we set 599 * that pointer to NULL and cause a crash during the lookup. If 600 * we see the request is completed (based on the value of the 601 * old engine and seqno), the lookup is complete and reports NULL. 602 * If we decide the request is not completed (new engine or seqno), 603 * then we grab a reference and double check that it is still the 604 * active request - which it won't be and restart the lookup. 605 * 606 * Do not use kmem_cache_zalloc() here! 607 */ 608 rq = kmem_cache_alloc(i915->requests, 609 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 610 if (unlikely(!rq)) { 611 rq = i915_request_alloc_slow(ce); 612 if (!rq) { 613 ret = -ENOMEM; 614 goto err_unreserve; 615 } 616 } 617 618 rq->rcustate = get_state_synchronize_rcu(); 619 620 INIT_LIST_HEAD(&rq->active_list); 621 rq->i915 = i915; 622 rq->engine = engine; 623 rq->gem_context = ctx; 624 rq->hw_context = ce; 625 rq->ring = ce->ring; 626 rq->timeline = ce->ring->timeline; 627 GEM_BUG_ON(rq->timeline == &engine->timeline); 628 rq->hwsp_seqno = rq->timeline->hwsp_seqno; 629 630 spin_lock_init(&rq->lock); 631 dma_fence_init(&rq->fence, 632 &i915_fence_ops, 633 &rq->lock, 634 rq->timeline->fence_context, 635 timeline_get_seqno(rq->timeline)); 636 637 /* We bump the ref for the fence chain */ 638 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); 639 640 i915_sched_node_init(&rq->sched); 641 642 /* No zalloc, must clear what we need by hand */ 643 rq->global_seqno = 0; 644 rq->file_priv = NULL; 645 rq->batch = NULL; 646 rq->capture_list = NULL; 647 rq->waitboost = false; 648 649 /* 650 * Reserve space in the ring buffer for all the commands required to 651 * eventually emit this request. This is to guarantee that the 652 * i915_request_add() call can't fail. Note that the reserve may need 653 * to be redone if the request is not actually submitted straight 654 * away, e.g. because a GPU scheduler has deferred it. 655 * 656 * Note that due to how we add reserved_space to intel_ring_begin() 657 * we need to double our request to ensure that if we need to wrap 658 * around inside i915_request_add() there is sufficient space at 659 * the beginning of the ring as well. 660 */ 661 rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32); 662 663 /* 664 * Record the position of the start of the request so that 665 * should we detect the updated seqno part-way through the 666 * GPU processing the request, we never over-estimate the 667 * position of the head. 668 */ 669 rq->head = rq->ring->emit; 670 671 ret = engine->request_alloc(rq); 672 if (ret) 673 goto err_unwind; 674 675 /* Keep a second pin for the dual retirement along engine and ring */ 676 __intel_context_pin(ce); 677 678 rq->infix = rq->ring->emit; /* end of header; start of user payload */ 679 680 /* Check that we didn't interrupt ourselves with a new request */ 681 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); 682 return rq; 683 684 err_unwind: 685 ce->ring->emit = rq->head; 686 687 /* Make sure we didn't add ourselves to external state before freeing */ 688 GEM_BUG_ON(!list_empty(&rq->active_list)); 689 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); 690 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); 691 692 kmem_cache_free(i915->requests, rq); 693 err_unreserve: 694 unreserve_gt(i915); 695 intel_context_unpin(ce); 696 return ERR_PTR(ret); 697 } 698 699 static int 700 i915_request_await_request(struct i915_request *to, struct i915_request *from) 701 { 702 int ret; 703 704 GEM_BUG_ON(to == from); 705 GEM_BUG_ON(to->timeline == from->timeline); 706 707 if (i915_request_completed(from)) 708 return 0; 709 710 if (to->engine->schedule) { 711 ret = i915_sched_node_add_dependency(to->i915, 712 &to->sched, 713 &from->sched); 714 if (ret < 0) 715 return ret; 716 } 717 718 if (to->engine == from->engine) { 719 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, 720 &from->submit, 721 I915_FENCE_GFP); 722 } else { 723 ret = i915_sw_fence_await_dma_fence(&to->submit, 724 &from->fence, 0, 725 I915_FENCE_GFP); 726 } 727 728 return ret < 0 ? ret : 0; 729 } 730 731 int 732 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 733 { 734 struct dma_fence **child = &fence; 735 unsigned int nchild = 1; 736 int ret; 737 738 /* 739 * Note that if the fence-array was created in signal-on-any mode, 740 * we should *not* decompose it into its individual fences. However, 741 * we don't currently store which mode the fence-array is operating 742 * in. Fortunately, the only user of signal-on-any is private to 743 * amdgpu and we should not see any incoming fence-array from 744 * sync-file being in signal-on-any mode. 745 */ 746 if (dma_fence_is_array(fence)) { 747 struct dma_fence_array *array = to_dma_fence_array(fence); 748 749 child = array->fences; 750 nchild = array->num_fences; 751 GEM_BUG_ON(!nchild); 752 } 753 754 do { 755 fence = *child++; 756 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 757 continue; 758 759 /* 760 * Requests on the same timeline are explicitly ordered, along 761 * with their dependencies, by i915_request_add() which ensures 762 * that requests are submitted in-order through each ring. 763 */ 764 if (fence->context == rq->fence.context) 765 continue; 766 767 /* Squash repeated waits to the same timelines */ 768 if (fence->context != rq->i915->mm.unordered_timeline && 769 i915_timeline_sync_is_later(rq->timeline, fence)) 770 continue; 771 772 if (dma_fence_is_i915(fence)) 773 ret = i915_request_await_request(rq, to_request(fence)); 774 else 775 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 776 I915_FENCE_TIMEOUT, 777 I915_FENCE_GFP); 778 if (ret < 0) 779 return ret; 780 781 /* Record the latest fence used against each timeline */ 782 if (fence->context != rq->i915->mm.unordered_timeline) 783 i915_timeline_sync_set(rq->timeline, fence); 784 } while (--nchild); 785 786 return 0; 787 } 788 789 /** 790 * i915_request_await_object - set this request to (async) wait upon a bo 791 * @to: request we are wishing to use 792 * @obj: object which may be in use on another ring. 793 * @write: whether the wait is on behalf of a writer 794 * 795 * This code is meant to abstract object synchronization with the GPU. 796 * Conceptually we serialise writes between engines inside the GPU. 797 * We only allow one engine to write into a buffer at any time, but 798 * multiple readers. To ensure each has a coherent view of memory, we must: 799 * 800 * - If there is an outstanding write request to the object, the new 801 * request must wait for it to complete (either CPU or in hw, requests 802 * on the same ring will be naturally ordered). 803 * 804 * - If we are a write request (pending_write_domain is set), the new 805 * request must wait for outstanding read requests to complete. 806 * 807 * Returns 0 if successful, else propagates up the lower layer error. 808 */ 809 int 810 i915_request_await_object(struct i915_request *to, 811 struct drm_i915_gem_object *obj, 812 bool write) 813 { 814 struct dma_fence *excl; 815 int ret = 0; 816 817 if (write) { 818 struct dma_fence **shared; 819 unsigned int count, i; 820 821 ret = reservation_object_get_fences_rcu(obj->resv, 822 &excl, &count, &shared); 823 if (ret) 824 return ret; 825 826 for (i = 0; i < count; i++) { 827 ret = i915_request_await_dma_fence(to, shared[i]); 828 if (ret) 829 break; 830 831 dma_fence_put(shared[i]); 832 } 833 834 for (; i < count; i++) 835 dma_fence_put(shared[i]); 836 kfree(shared); 837 } else { 838 excl = reservation_object_get_excl_rcu(obj->resv); 839 } 840 841 if (excl) { 842 if (ret == 0) 843 ret = i915_request_await_dma_fence(to, excl); 844 845 dma_fence_put(excl); 846 } 847 848 return ret; 849 } 850 851 void i915_request_skip(struct i915_request *rq, int error) 852 { 853 void *vaddr = rq->ring->vaddr; 854 u32 head; 855 856 GEM_BUG_ON(!IS_ERR_VALUE((long)error)); 857 dma_fence_set_error(&rq->fence, error); 858 859 /* 860 * As this request likely depends on state from the lost 861 * context, clear out all the user operations leaving the 862 * breadcrumb at the end (so we get the fence notifications). 863 */ 864 head = rq->infix; 865 if (rq->postfix < head) { 866 memset(vaddr + head, 0, rq->ring->size - head); 867 head = 0; 868 } 869 memset(vaddr + head, 0, rq->postfix - head); 870 } 871 872 /* 873 * NB: This function is not allowed to fail. Doing so would mean the the 874 * request is not being tracked for completion but the work itself is 875 * going to happen on the hardware. This would be a Bad Thing(tm). 876 */ 877 void i915_request_add(struct i915_request *request) 878 { 879 struct intel_engine_cs *engine = request->engine; 880 struct i915_timeline *timeline = request->timeline; 881 struct intel_ring *ring = request->ring; 882 struct i915_request *prev; 883 u32 *cs; 884 885 GEM_TRACE("%s fence %llx:%lld\n", 886 engine->name, request->fence.context, request->fence.seqno); 887 888 lockdep_assert_held(&request->i915->drm.struct_mutex); 889 trace_i915_request_add(request); 890 891 /* 892 * Make sure that no request gazumped us - if it was allocated after 893 * our i915_request_alloc() and called __i915_request_add() before 894 * us, the timeline will hold its seqno which is later than ours. 895 */ 896 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 897 898 /* 899 * To ensure that this call will not fail, space for its emissions 900 * should already have been reserved in the ring buffer. Let the ring 901 * know that it is time to use that space up. 902 */ 903 GEM_BUG_ON(request->reserved_space > request->ring->space); 904 request->reserved_space = 0; 905 906 /* 907 * Record the position of the start of the breadcrumb so that 908 * should we detect the updated seqno part-way through the 909 * GPU processing the request, we never over-estimate the 910 * position of the ring's HEAD. 911 */ 912 cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw); 913 GEM_BUG_ON(IS_ERR(cs)); 914 request->postfix = intel_ring_offset(request, cs); 915 916 /* 917 * Seal the request and mark it as pending execution. Note that 918 * we may inspect this state, without holding any locks, during 919 * hangcheck. Hence we apply the barrier to ensure that we do not 920 * see a more recent value in the hws than we are tracking. 921 */ 922 923 prev = i915_gem_active_raw(&timeline->last_request, 924 &request->i915->drm.struct_mutex); 925 if (prev && !i915_request_completed(prev)) { 926 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 927 &request->submitq); 928 if (engine->schedule) 929 __i915_sched_node_add_dependency(&request->sched, 930 &prev->sched, 931 &request->dep, 932 0); 933 } 934 935 spin_lock_irq(&timeline->lock); 936 list_add_tail(&request->link, &timeline->requests); 937 spin_unlock_irq(&timeline->lock); 938 939 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 940 i915_gem_active_set(&timeline->last_request, request); 941 942 list_add_tail(&request->ring_link, &ring->request_list); 943 if (list_is_first(&request->ring_link, &ring->request_list)) { 944 GEM_TRACE("marking %s as active\n", ring->timeline->name); 945 list_add(&ring->active_link, &request->i915->gt.active_rings); 946 } 947 request->emitted_jiffies = jiffies; 948 949 /* 950 * Let the backend know a new request has arrived that may need 951 * to adjust the existing execution schedule due to a high priority 952 * request - i.e. we may want to preempt the current request in order 953 * to run a high priority dependency chain *before* we can execute this 954 * request. 955 * 956 * This is called before the request is ready to run so that we can 957 * decide whether to preempt the entire chain so that it is ready to 958 * run at the earliest possible convenience. 959 */ 960 local_bh_disable(); 961 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 962 if (engine->schedule) { 963 struct i915_sched_attr attr = request->gem_context->sched; 964 965 /* 966 * Boost priorities to new clients (new request flows). 967 * 968 * Allow interactive/synchronous clients to jump ahead of 969 * the bulk clients. (FQ_CODEL) 970 */ 971 if (!prev || i915_request_completed(prev)) 972 attr.priority |= I915_PRIORITY_NEWCLIENT; 973 974 engine->schedule(request, &attr); 975 } 976 rcu_read_unlock(); 977 i915_sw_fence_commit(&request->submit); 978 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 979 980 /* 981 * In typical scenarios, we do not expect the previous request on 982 * the timeline to be still tracked by timeline->last_request if it 983 * has been completed. If the completed request is still here, that 984 * implies that request retirement is a long way behind submission, 985 * suggesting that we haven't been retiring frequently enough from 986 * the combination of retire-before-alloc, waiters and the background 987 * retirement worker. So if the last request on this timeline was 988 * already completed, do a catch up pass, flushing the retirement queue 989 * up to this client. Since we have now moved the heaviest operations 990 * during retirement onto secondary workers, such as freeing objects 991 * or contexts, retiring a bunch of requests is mostly list management 992 * (and cache misses), and so we should not be overly penalizing this 993 * client by performing excess work, though we may still performing 994 * work on behalf of others -- but instead we should benefit from 995 * improved resource management. (Well, that's the theory at least.) 996 */ 997 if (prev && i915_request_completed(prev)) 998 i915_request_retire_upto(prev); 999 } 1000 1001 static unsigned long local_clock_us(unsigned int *cpu) 1002 { 1003 unsigned long t; 1004 1005 /* 1006 * Cheaply and approximately convert from nanoseconds to microseconds. 1007 * The result and subsequent calculations are also defined in the same 1008 * approximate microseconds units. The principal source of timing 1009 * error here is from the simple truncation. 1010 * 1011 * Note that local_clock() is only defined wrt to the current CPU; 1012 * the comparisons are no longer valid if we switch CPUs. Instead of 1013 * blocking preemption for the entire busywait, we can detect the CPU 1014 * switch and use that as indicator of system load and a reason to 1015 * stop busywaiting, see busywait_stop(). 1016 */ 1017 *cpu = get_cpu(); 1018 t = local_clock() >> 10; 1019 put_cpu(); 1020 1021 return t; 1022 } 1023 1024 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 1025 { 1026 unsigned int this_cpu; 1027 1028 if (time_after(local_clock_us(&this_cpu), timeout)) 1029 return true; 1030 1031 return this_cpu != cpu; 1032 } 1033 1034 static bool __i915_spin_request(const struct i915_request * const rq, 1035 int state, unsigned long timeout_us) 1036 { 1037 unsigned int cpu; 1038 1039 /* 1040 * Only wait for the request if we know it is likely to complete. 1041 * 1042 * We don't track the timestamps around requests, nor the average 1043 * request length, so we do not have a good indicator that this 1044 * request will complete within the timeout. What we do know is the 1045 * order in which requests are executed by the context and so we can 1046 * tell if the request has been started. If the request is not even 1047 * running yet, it is a fair assumption that it will not complete 1048 * within our relatively short timeout. 1049 */ 1050 if (!i915_request_is_running(rq)) 1051 return false; 1052 1053 /* 1054 * When waiting for high frequency requests, e.g. during synchronous 1055 * rendering split between the CPU and GPU, the finite amount of time 1056 * required to set up the irq and wait upon it limits the response 1057 * rate. By busywaiting on the request completion for a short while we 1058 * can service the high frequency waits as quick as possible. However, 1059 * if it is a slow request, we want to sleep as quickly as possible. 1060 * The tradeoff between waiting and sleeping is roughly the time it 1061 * takes to sleep on a request, on the order of a microsecond. 1062 */ 1063 1064 timeout_us += local_clock_us(&cpu); 1065 do { 1066 if (i915_request_completed(rq)) 1067 return true; 1068 1069 if (signal_pending_state(state, current)) 1070 break; 1071 1072 if (busywait_stop(timeout_us, cpu)) 1073 break; 1074 1075 cpu_relax(); 1076 } while (!need_resched()); 1077 1078 return false; 1079 } 1080 1081 struct request_wait { 1082 struct dma_fence_cb cb; 1083 struct task_struct *tsk; 1084 }; 1085 1086 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) 1087 { 1088 struct request_wait *wait = container_of(cb, typeof(*wait), cb); 1089 1090 wake_up_process(wait->tsk); 1091 } 1092 1093 /** 1094 * i915_request_wait - wait until execution of request has finished 1095 * @rq: the request to wait upon 1096 * @flags: how to wait 1097 * @timeout: how long to wait in jiffies 1098 * 1099 * i915_request_wait() waits for the request to be completed, for a 1100 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1101 * unbounded wait). 1102 * 1103 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED 1104 * in via the flags, and vice versa if the struct_mutex is not held, the caller 1105 * must not specify that the wait is locked. 1106 * 1107 * Returns the remaining time (in jiffies) if the request completed, which may 1108 * be zero or -ETIME if the request is unfinished after the timeout expires. 1109 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1110 * pending before the request completes. 1111 */ 1112 long i915_request_wait(struct i915_request *rq, 1113 unsigned int flags, 1114 long timeout) 1115 { 1116 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1117 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1118 struct request_wait wait; 1119 1120 might_sleep(); 1121 GEM_BUG_ON(timeout < 0); 1122 1123 if (i915_request_completed(rq)) 1124 return timeout; 1125 1126 if (!timeout) 1127 return -ETIME; 1128 1129 trace_i915_request_wait_begin(rq, flags); 1130 1131 /* Optimistic short spin before touching IRQs */ 1132 if (__i915_spin_request(rq, state, 5)) 1133 goto out; 1134 1135 if (flags & I915_WAIT_PRIORITY) 1136 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); 1137 1138 wait.tsk = current; 1139 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) 1140 goto out; 1141 1142 for (;;) { 1143 set_current_state(state); 1144 1145 if (i915_request_completed(rq)) 1146 break; 1147 1148 if (signal_pending_state(state, current)) { 1149 timeout = -ERESTARTSYS; 1150 break; 1151 } 1152 1153 if (!timeout) { 1154 timeout = -ETIME; 1155 break; 1156 } 1157 1158 timeout = io_schedule_timeout(timeout); 1159 } 1160 __set_current_state(TASK_RUNNING); 1161 1162 dma_fence_remove_callback(&rq->fence, &wait.cb); 1163 1164 out: 1165 trace_i915_request_wait_end(rq); 1166 return timeout; 1167 } 1168 1169 void i915_retire_requests(struct drm_i915_private *i915) 1170 { 1171 struct intel_ring *ring, *tmp; 1172 1173 lockdep_assert_held(&i915->drm.struct_mutex); 1174 1175 if (!i915->gt.active_requests) 1176 return; 1177 1178 list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link) 1179 ring_retire_requests(ring); 1180 } 1181 1182 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1183 #include "selftests/mock_request.c" 1184 #include "selftests/i915_request.c" 1185 #endif 1186