1 /* 2 * Copyright © 2008-2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #include <linux/prefetch.h> 26 #include <linux/dma-fence-array.h> 27 #include <linux/sched.h> 28 #include <linux/sched/clock.h> 29 #include <linux/sched/signal.h> 30 31 #include "i915_drv.h" 32 33 static const char *i915_fence_get_driver_name(struct dma_fence *fence) 34 { 35 return "i915"; 36 } 37 38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence) 39 { 40 /* 41 * The timeline struct (as part of the ppgtt underneath a context) 42 * may be freed when the request is no longer in use by the GPU. 43 * We could extend the life of a context to beyond that of all 44 * fences, possibly keeping the hw resource around indefinitely, 45 * or we just give them a false name. Since 46 * dma_fence_ops.get_timeline_name is a debug feature, the occasional 47 * lie seems justifiable. 48 */ 49 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 50 return "signaled"; 51 52 return to_request(fence)->timeline->name; 53 } 54 55 static bool i915_fence_signaled(struct dma_fence *fence) 56 { 57 return i915_request_completed(to_request(fence)); 58 } 59 60 static bool i915_fence_enable_signaling(struct dma_fence *fence) 61 { 62 return intel_engine_enable_signaling(to_request(fence), true); 63 } 64 65 static signed long i915_fence_wait(struct dma_fence *fence, 66 bool interruptible, 67 signed long timeout) 68 { 69 return i915_request_wait(to_request(fence), interruptible, timeout); 70 } 71 72 static void i915_fence_release(struct dma_fence *fence) 73 { 74 struct i915_request *rq = to_request(fence); 75 76 /* 77 * The request is put onto a RCU freelist (i.e. the address 78 * is immediately reused), mark the fences as being freed now. 79 * Otherwise the debugobjects for the fences are only marked as 80 * freed when the slab cache itself is freed, and so we would get 81 * caught trying to reuse dead objects. 82 */ 83 i915_sw_fence_fini(&rq->submit); 84 85 kmem_cache_free(rq->i915->requests, rq); 86 } 87 88 const struct dma_fence_ops i915_fence_ops = { 89 .get_driver_name = i915_fence_get_driver_name, 90 .get_timeline_name = i915_fence_get_timeline_name, 91 .enable_signaling = i915_fence_enable_signaling, 92 .signaled = i915_fence_signaled, 93 .wait = i915_fence_wait, 94 .release = i915_fence_release, 95 }; 96 97 static inline void 98 i915_request_remove_from_client(struct i915_request *request) 99 { 100 struct drm_i915_file_private *file_priv; 101 102 file_priv = request->file_priv; 103 if (!file_priv) 104 return; 105 106 spin_lock(&file_priv->mm.lock); 107 if (request->file_priv) { 108 list_del(&request->client_link); 109 request->file_priv = NULL; 110 } 111 spin_unlock(&file_priv->mm.lock); 112 } 113 114 static struct i915_dependency * 115 i915_dependency_alloc(struct drm_i915_private *i915) 116 { 117 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); 118 } 119 120 static void 121 i915_dependency_free(struct drm_i915_private *i915, 122 struct i915_dependency *dep) 123 { 124 kmem_cache_free(i915->dependencies, dep); 125 } 126 127 static void 128 __i915_sched_node_add_dependency(struct i915_sched_node *node, 129 struct i915_sched_node *signal, 130 struct i915_dependency *dep, 131 unsigned long flags) 132 { 133 INIT_LIST_HEAD(&dep->dfs_link); 134 list_add(&dep->wait_link, &signal->waiters_list); 135 list_add(&dep->signal_link, &node->signalers_list); 136 dep->signaler = signal; 137 dep->flags = flags; 138 } 139 140 static int 141 i915_sched_node_add_dependency(struct drm_i915_private *i915, 142 struct i915_sched_node *node, 143 struct i915_sched_node *signal) 144 { 145 struct i915_dependency *dep; 146 147 dep = i915_dependency_alloc(i915); 148 if (!dep) 149 return -ENOMEM; 150 151 __i915_sched_node_add_dependency(node, signal, dep, 152 I915_DEPENDENCY_ALLOC); 153 return 0; 154 } 155 156 static void 157 i915_sched_node_fini(struct drm_i915_private *i915, 158 struct i915_sched_node *node) 159 { 160 struct i915_dependency *dep, *tmp; 161 162 GEM_BUG_ON(!list_empty(&node->link)); 163 164 /* 165 * Everyone we depended upon (the fences we wait to be signaled) 166 * should retire before us and remove themselves from our list. 167 * However, retirement is run independently on each timeline and 168 * so we may be called out-of-order. 169 */ 170 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { 171 GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler)); 172 GEM_BUG_ON(!list_empty(&dep->dfs_link)); 173 174 list_del(&dep->wait_link); 175 if (dep->flags & I915_DEPENDENCY_ALLOC) 176 i915_dependency_free(i915, dep); 177 } 178 179 /* Remove ourselves from everyone who depends upon us */ 180 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { 181 GEM_BUG_ON(dep->signaler != node); 182 GEM_BUG_ON(!list_empty(&dep->dfs_link)); 183 184 list_del(&dep->signal_link); 185 if (dep->flags & I915_DEPENDENCY_ALLOC) 186 i915_dependency_free(i915, dep); 187 } 188 } 189 190 static void 191 i915_sched_node_init(struct i915_sched_node *node) 192 { 193 INIT_LIST_HEAD(&node->signalers_list); 194 INIT_LIST_HEAD(&node->waiters_list); 195 INIT_LIST_HEAD(&node->link); 196 node->attr.priority = I915_PRIORITY_INVALID; 197 } 198 199 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) 200 { 201 struct intel_engine_cs *engine; 202 struct i915_timeline *timeline; 203 enum intel_engine_id id; 204 int ret; 205 206 /* Carefully retire all requests without writing to the rings */ 207 ret = i915_gem_wait_for_idle(i915, 208 I915_WAIT_INTERRUPTIBLE | 209 I915_WAIT_LOCKED); 210 if (ret) 211 return ret; 212 213 GEM_BUG_ON(i915->gt.active_requests); 214 215 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ 216 for_each_engine(engine, i915, id) { 217 GEM_TRACE("%s seqno %d (current %d) -> %d\n", 218 engine->name, 219 engine->timeline.seqno, 220 intel_engine_get_seqno(engine), 221 seqno); 222 223 if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { 224 /* Flush any waiters before we reuse the seqno */ 225 intel_engine_disarm_breadcrumbs(engine); 226 intel_engine_init_hangcheck(engine); 227 GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals)); 228 } 229 230 /* Check we are idle before we fiddle with hw state! */ 231 GEM_BUG_ON(!intel_engine_is_idle(engine)); 232 GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request)); 233 234 /* Finally reset hw state */ 235 intel_engine_init_global_seqno(engine, seqno); 236 engine->timeline.seqno = seqno; 237 } 238 239 list_for_each_entry(timeline, &i915->gt.timelines, link) 240 memset(timeline->global_sync, 0, sizeof(timeline->global_sync)); 241 242 i915->gt.request_serial = seqno; 243 244 return 0; 245 } 246 247 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) 248 { 249 struct drm_i915_private *i915 = to_i915(dev); 250 251 lockdep_assert_held(&i915->drm.struct_mutex); 252 253 if (seqno == 0) 254 return -EINVAL; 255 256 /* HWS page needs to be set less than what we will inject to ring */ 257 return reset_all_global_seqno(i915, seqno - 1); 258 } 259 260 static int reserve_gt(struct drm_i915_private *i915) 261 { 262 int ret; 263 264 /* 265 * Reservation is fine until we may need to wrap around 266 * 267 * By incrementing the serial for every request, we know that no 268 * individual engine may exceed that serial (as each is reset to 0 269 * on any wrap). This protects even the most pessimistic of migrations 270 * of every request from all engines onto just one. 271 */ 272 while (unlikely(++i915->gt.request_serial == 0)) { 273 ret = reset_all_global_seqno(i915, 0); 274 if (ret) { 275 i915->gt.request_serial--; 276 return ret; 277 } 278 } 279 280 if (!i915->gt.active_requests++) 281 i915_gem_unpark(i915); 282 283 return 0; 284 } 285 286 static void unreserve_gt(struct drm_i915_private *i915) 287 { 288 GEM_BUG_ON(!i915->gt.active_requests); 289 if (!--i915->gt.active_requests) 290 i915_gem_park(i915); 291 } 292 293 void i915_gem_retire_noop(struct i915_gem_active *active, 294 struct i915_request *request) 295 { 296 /* Space left intentionally blank */ 297 } 298 299 static void advance_ring(struct i915_request *request) 300 { 301 struct intel_ring *ring = request->ring; 302 unsigned int tail; 303 304 /* 305 * We know the GPU must have read the request to have 306 * sent us the seqno + interrupt, so use the position 307 * of tail of the request to update the last known position 308 * of the GPU head. 309 * 310 * Note this requires that we are always called in request 311 * completion order. 312 */ 313 GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list)); 314 if (list_is_last(&request->ring_link, &ring->request_list)) { 315 /* 316 * We may race here with execlists resubmitting this request 317 * as we retire it. The resubmission will move the ring->tail 318 * forwards (to request->wa_tail). We either read the 319 * current value that was written to hw, or the value that 320 * is just about to be. Either works, if we miss the last two 321 * noops - they are safe to be replayed on a reset. 322 */ 323 GEM_TRACE("marking %s as inactive\n", ring->timeline->name); 324 tail = READ_ONCE(request->tail); 325 list_del(&ring->active_link); 326 } else { 327 tail = request->postfix; 328 } 329 list_del_init(&request->ring_link); 330 331 ring->head = tail; 332 } 333 334 static void free_capture_list(struct i915_request *request) 335 { 336 struct i915_capture_list *capture; 337 338 capture = request->capture_list; 339 while (capture) { 340 struct i915_capture_list *next = capture->next; 341 342 kfree(capture); 343 capture = next; 344 } 345 } 346 347 static void __retire_engine_request(struct intel_engine_cs *engine, 348 struct i915_request *rq) 349 { 350 GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n", 351 __func__, engine->name, 352 rq->fence.context, rq->fence.seqno, 353 rq->global_seqno, 354 intel_engine_get_seqno(engine)); 355 356 GEM_BUG_ON(!i915_request_completed(rq)); 357 358 local_irq_disable(); 359 360 spin_lock(&engine->timeline.lock); 361 GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); 362 list_del_init(&rq->link); 363 spin_unlock(&engine->timeline.lock); 364 365 spin_lock(&rq->lock); 366 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 367 dma_fence_signal_locked(&rq->fence); 368 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) 369 intel_engine_cancel_signaling(rq); 370 if (rq->waitboost) { 371 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); 372 atomic_dec(&rq->i915->gt_pm.rps.num_waiters); 373 } 374 spin_unlock(&rq->lock); 375 376 local_irq_enable(); 377 378 /* 379 * The backing object for the context is done after switching to the 380 * *next* context. Therefore we cannot retire the previous context until 381 * the next context has already started running. However, since we 382 * cannot take the required locks at i915_request_submit() we 383 * defer the unpinning of the active context to now, retirement of 384 * the subsequent request. 385 */ 386 if (engine->last_retired_context) 387 intel_context_unpin(engine->last_retired_context); 388 engine->last_retired_context = rq->hw_context; 389 } 390 391 static void __retire_engine_upto(struct intel_engine_cs *engine, 392 struct i915_request *rq) 393 { 394 struct i915_request *tmp; 395 396 if (list_empty(&rq->link)) 397 return; 398 399 do { 400 tmp = list_first_entry(&engine->timeline.requests, 401 typeof(*tmp), link); 402 403 GEM_BUG_ON(tmp->engine != engine); 404 __retire_engine_request(engine, tmp); 405 } while (tmp != rq); 406 } 407 408 static void i915_request_retire(struct i915_request *request) 409 { 410 struct i915_gem_active *active, *next; 411 412 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", 413 request->engine->name, 414 request->fence.context, request->fence.seqno, 415 request->global_seqno, 416 intel_engine_get_seqno(request->engine)); 417 418 lockdep_assert_held(&request->i915->drm.struct_mutex); 419 GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); 420 GEM_BUG_ON(!i915_request_completed(request)); 421 422 trace_i915_request_retire(request); 423 424 advance_ring(request); 425 free_capture_list(request); 426 427 /* 428 * Walk through the active list, calling retire on each. This allows 429 * objects to track their GPU activity and mark themselves as idle 430 * when their *last* active request is completed (updating state 431 * tracking lists for eviction, active references for GEM, etc). 432 * 433 * As the ->retire() may free the node, we decouple it first and 434 * pass along the auxiliary information (to avoid dereferencing 435 * the node after the callback). 436 */ 437 list_for_each_entry_safe(active, next, &request->active_list, link) { 438 /* 439 * In microbenchmarks or focusing upon time inside the kernel, 440 * we may spend an inordinate amount of time simply handling 441 * the retirement of requests and processing their callbacks. 442 * Of which, this loop itself is particularly hot due to the 443 * cache misses when jumping around the list of i915_gem_active. 444 * So we try to keep this loop as streamlined as possible and 445 * also prefetch the next i915_gem_active to try and hide 446 * the likely cache miss. 447 */ 448 prefetchw(next); 449 450 INIT_LIST_HEAD(&active->link); 451 RCU_INIT_POINTER(active->request, NULL); 452 453 active->retire(active, request); 454 } 455 456 i915_request_remove_from_client(request); 457 458 /* Retirement decays the ban score as it is a sign of ctx progress */ 459 atomic_dec_if_positive(&request->gem_context->ban_score); 460 intel_context_unpin(request->hw_context); 461 462 __retire_engine_upto(request->engine, request); 463 464 unreserve_gt(request->i915); 465 466 i915_sched_node_fini(request->i915, &request->sched); 467 i915_request_put(request); 468 } 469 470 void i915_request_retire_upto(struct i915_request *rq) 471 { 472 struct intel_ring *ring = rq->ring; 473 struct i915_request *tmp; 474 475 GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", 476 rq->engine->name, 477 rq->fence.context, rq->fence.seqno, 478 rq->global_seqno, 479 intel_engine_get_seqno(rq->engine)); 480 481 lockdep_assert_held(&rq->i915->drm.struct_mutex); 482 GEM_BUG_ON(!i915_request_completed(rq)); 483 484 if (list_empty(&rq->ring_link)) 485 return; 486 487 do { 488 tmp = list_first_entry(&ring->request_list, 489 typeof(*tmp), ring_link); 490 491 i915_request_retire(tmp); 492 } while (tmp != rq); 493 } 494 495 static u32 timeline_get_seqno(struct i915_timeline *tl) 496 { 497 return ++tl->seqno; 498 } 499 500 static void move_to_timeline(struct i915_request *request, 501 struct i915_timeline *timeline) 502 { 503 GEM_BUG_ON(request->timeline == &request->engine->timeline); 504 lockdep_assert_held(&request->engine->timeline.lock); 505 506 spin_lock_nested(&request->timeline->lock, SINGLE_DEPTH_NESTING); 507 list_move_tail(&request->link, &timeline->requests); 508 spin_unlock(&request->timeline->lock); 509 } 510 511 void __i915_request_submit(struct i915_request *request) 512 { 513 struct intel_engine_cs *engine = request->engine; 514 u32 seqno; 515 516 GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n", 517 engine->name, 518 request->fence.context, request->fence.seqno, 519 engine->timeline.seqno + 1, 520 intel_engine_get_seqno(engine)); 521 522 GEM_BUG_ON(!irqs_disabled()); 523 lockdep_assert_held(&engine->timeline.lock); 524 525 GEM_BUG_ON(request->global_seqno); 526 527 seqno = timeline_get_seqno(&engine->timeline); 528 GEM_BUG_ON(!seqno); 529 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); 530 531 /* We may be recursing from the signal callback of another i915 fence */ 532 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 533 request->global_seqno = seqno; 534 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 535 intel_engine_enable_signaling(request, false); 536 spin_unlock(&request->lock); 537 538 engine->emit_breadcrumb(request, 539 request->ring->vaddr + request->postfix); 540 541 /* Transfer from per-context onto the global per-engine timeline */ 542 move_to_timeline(request, &engine->timeline); 543 544 trace_i915_request_execute(request); 545 546 wake_up_all(&request->execute); 547 } 548 549 void i915_request_submit(struct i915_request *request) 550 { 551 struct intel_engine_cs *engine = request->engine; 552 unsigned long flags; 553 554 /* Will be called from irq-context when using foreign fences. */ 555 spin_lock_irqsave(&engine->timeline.lock, flags); 556 557 __i915_request_submit(request); 558 559 spin_unlock_irqrestore(&engine->timeline.lock, flags); 560 } 561 562 void __i915_request_unsubmit(struct i915_request *request) 563 { 564 struct intel_engine_cs *engine = request->engine; 565 566 GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n", 567 engine->name, 568 request->fence.context, request->fence.seqno, 569 request->global_seqno, 570 intel_engine_get_seqno(engine)); 571 572 GEM_BUG_ON(!irqs_disabled()); 573 lockdep_assert_held(&engine->timeline.lock); 574 575 /* 576 * Only unwind in reverse order, required so that the per-context list 577 * is kept in seqno/ring order. 578 */ 579 GEM_BUG_ON(!request->global_seqno); 580 GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); 581 GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), 582 request->global_seqno)); 583 engine->timeline.seqno--; 584 585 /* We may be recursing from the signal callback of another i915 fence */ 586 spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); 587 request->global_seqno = 0; 588 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 589 intel_engine_cancel_signaling(request); 590 spin_unlock(&request->lock); 591 592 /* Transfer back from the global per-engine timeline to per-context */ 593 move_to_timeline(request, request->timeline); 594 595 /* 596 * We don't need to wake_up any waiters on request->execute, they 597 * will get woken by any other event or us re-adding this request 598 * to the engine timeline (__i915_request_submit()). The waiters 599 * should be quite adapt at finding that the request now has a new 600 * global_seqno to the one they went to sleep on. 601 */ 602 } 603 604 void i915_request_unsubmit(struct i915_request *request) 605 { 606 struct intel_engine_cs *engine = request->engine; 607 unsigned long flags; 608 609 /* Will be called from irq-context when using foreign fences. */ 610 spin_lock_irqsave(&engine->timeline.lock, flags); 611 612 __i915_request_unsubmit(request); 613 614 spin_unlock_irqrestore(&engine->timeline.lock, flags); 615 } 616 617 static int __i915_sw_fence_call 618 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 619 { 620 struct i915_request *request = 621 container_of(fence, typeof(*request), submit); 622 623 switch (state) { 624 case FENCE_COMPLETE: 625 trace_i915_request_submit(request); 626 /* 627 * We need to serialize use of the submit_request() callback 628 * with its hotplugging performed during an emergency 629 * i915_gem_set_wedged(). We use the RCU mechanism to mark the 630 * critical section in order to force i915_gem_set_wedged() to 631 * wait until the submit_request() is completed before 632 * proceeding. 633 */ 634 rcu_read_lock(); 635 request->engine->submit_request(request); 636 rcu_read_unlock(); 637 break; 638 639 case FENCE_FREE: 640 i915_request_put(request); 641 break; 642 } 643 644 return NOTIFY_DONE; 645 } 646 647 /** 648 * i915_request_alloc - allocate a request structure 649 * 650 * @engine: engine that we wish to issue the request on. 651 * @ctx: context that the request will be associated with. 652 * 653 * Returns a pointer to the allocated request if successful, 654 * or an error code if not. 655 */ 656 struct i915_request * 657 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) 658 { 659 struct drm_i915_private *i915 = engine->i915; 660 struct i915_request *rq; 661 struct intel_context *ce; 662 int ret; 663 664 lockdep_assert_held(&i915->drm.struct_mutex); 665 666 /* 667 * Preempt contexts are reserved for exclusive use to inject a 668 * preemption context switch. They are never to be used for any trivial 669 * request! 670 */ 671 GEM_BUG_ON(ctx == i915->preempt_context); 672 673 /* 674 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report 675 * EIO if the GPU is already wedged. 676 */ 677 if (i915_terminally_wedged(&i915->gpu_error)) 678 return ERR_PTR(-EIO); 679 680 /* 681 * Pinning the contexts may generate requests in order to acquire 682 * GGTT space, so do this first before we reserve a seqno for 683 * ourselves. 684 */ 685 ce = intel_context_pin(ctx, engine); 686 if (IS_ERR(ce)) 687 return ERR_CAST(ce); 688 689 ret = reserve_gt(i915); 690 if (ret) 691 goto err_unpin; 692 693 ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST); 694 if (ret) 695 goto err_unreserve; 696 697 /* Move our oldest request to the slab-cache (if not in use!) */ 698 rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); 699 if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && 700 i915_request_completed(rq)) 701 i915_request_retire(rq); 702 703 /* 704 * Beware: Dragons be flying overhead. 705 * 706 * We use RCU to look up requests in flight. The lookups may 707 * race with the request being allocated from the slab freelist. 708 * That is the request we are writing to here, may be in the process 709 * of being read by __i915_gem_active_get_rcu(). As such, 710 * we have to be very careful when overwriting the contents. During 711 * the RCU lookup, we change chase the request->engine pointer, 712 * read the request->global_seqno and increment the reference count. 713 * 714 * The reference count is incremented atomically. If it is zero, 715 * the lookup knows the request is unallocated and complete. Otherwise, 716 * it is either still in use, or has been reallocated and reset 717 * with dma_fence_init(). This increment is safe for release as we 718 * check that the request we have a reference to and matches the active 719 * request. 720 * 721 * Before we increment the refcount, we chase the request->engine 722 * pointer. We must not call kmem_cache_zalloc() or else we set 723 * that pointer to NULL and cause a crash during the lookup. If 724 * we see the request is completed (based on the value of the 725 * old engine and seqno), the lookup is complete and reports NULL. 726 * If we decide the request is not completed (new engine or seqno), 727 * then we grab a reference and double check that it is still the 728 * active request - which it won't be and restart the lookup. 729 * 730 * Do not use kmem_cache_zalloc() here! 731 */ 732 rq = kmem_cache_alloc(i915->requests, 733 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 734 if (unlikely(!rq)) { 735 /* Ratelimit ourselves to prevent oom from malicious clients */ 736 ret = i915_gem_wait_for_idle(i915, 737 I915_WAIT_LOCKED | 738 I915_WAIT_INTERRUPTIBLE); 739 if (ret) 740 goto err_unreserve; 741 742 /* 743 * We've forced the client to stall and catch up with whatever 744 * backlog there might have been. As we are assuming that we 745 * caused the mempressure, now is an opportune time to 746 * recover as much memory from the request pool as is possible. 747 * Having already penalized the client to stall, we spend 748 * a little extra time to re-optimise page allocation. 749 */ 750 kmem_cache_shrink(i915->requests); 751 rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ 752 753 rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); 754 if (!rq) { 755 ret = -ENOMEM; 756 goto err_unreserve; 757 } 758 } 759 760 INIT_LIST_HEAD(&rq->active_list); 761 rq->i915 = i915; 762 rq->engine = engine; 763 rq->gem_context = ctx; 764 rq->hw_context = ce; 765 rq->ring = ce->ring; 766 rq->timeline = ce->ring->timeline; 767 GEM_BUG_ON(rq->timeline == &engine->timeline); 768 769 spin_lock_init(&rq->lock); 770 dma_fence_init(&rq->fence, 771 &i915_fence_ops, 772 &rq->lock, 773 rq->timeline->fence_context, 774 timeline_get_seqno(rq->timeline)); 775 776 /* We bump the ref for the fence chain */ 777 i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); 778 init_waitqueue_head(&rq->execute); 779 780 i915_sched_node_init(&rq->sched); 781 782 /* No zalloc, must clear what we need by hand */ 783 rq->global_seqno = 0; 784 rq->signaling.wait.seqno = 0; 785 rq->file_priv = NULL; 786 rq->batch = NULL; 787 rq->capture_list = NULL; 788 rq->waitboost = false; 789 790 /* 791 * Reserve space in the ring buffer for all the commands required to 792 * eventually emit this request. This is to guarantee that the 793 * i915_request_add() call can't fail. Note that the reserve may need 794 * to be redone if the request is not actually submitted straight 795 * away, e.g. because a GPU scheduler has deferred it. 796 */ 797 rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; 798 GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); 799 800 /* 801 * Record the position of the start of the request so that 802 * should we detect the updated seqno part-way through the 803 * GPU processing the request, we never over-estimate the 804 * position of the head. 805 */ 806 rq->head = rq->ring->emit; 807 808 /* Unconditionally invalidate GPU caches and TLBs. */ 809 ret = engine->emit_flush(rq, EMIT_INVALIDATE); 810 if (ret) 811 goto err_unwind; 812 813 ret = engine->request_alloc(rq); 814 if (ret) 815 goto err_unwind; 816 817 /* Keep a second pin for the dual retirement along engine and ring */ 818 __intel_context_pin(ce); 819 820 rq->infix = rq->ring->emit; /* end of header; start of user payload */ 821 822 /* Check that we didn't interrupt ourselves with a new request */ 823 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); 824 return rq; 825 826 err_unwind: 827 ce->ring->emit = rq->head; 828 829 /* Make sure we didn't add ourselves to external state before freeing */ 830 GEM_BUG_ON(!list_empty(&rq->active_list)); 831 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); 832 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); 833 834 kmem_cache_free(i915->requests, rq); 835 err_unreserve: 836 unreserve_gt(i915); 837 err_unpin: 838 intel_context_unpin(ce); 839 return ERR_PTR(ret); 840 } 841 842 static int 843 i915_request_await_request(struct i915_request *to, struct i915_request *from) 844 { 845 int ret; 846 847 GEM_BUG_ON(to == from); 848 GEM_BUG_ON(to->timeline == from->timeline); 849 850 if (i915_request_completed(from)) 851 return 0; 852 853 if (to->engine->schedule) { 854 ret = i915_sched_node_add_dependency(to->i915, 855 &to->sched, 856 &from->sched); 857 if (ret < 0) 858 return ret; 859 } 860 861 if (to->engine == from->engine) { 862 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, 863 &from->submit, 864 I915_FENCE_GFP); 865 return ret < 0 ? ret : 0; 866 } 867 868 if (to->engine->semaphore.sync_to) { 869 u32 seqno; 870 871 GEM_BUG_ON(!from->engine->semaphore.signal); 872 873 seqno = i915_request_global_seqno(from); 874 if (!seqno) 875 goto await_dma_fence; 876 877 if (seqno <= to->timeline->global_sync[from->engine->id]) 878 return 0; 879 880 trace_i915_gem_ring_sync_to(to, from); 881 ret = to->engine->semaphore.sync_to(to, from); 882 if (ret) 883 return ret; 884 885 to->timeline->global_sync[from->engine->id] = seqno; 886 return 0; 887 } 888 889 await_dma_fence: 890 ret = i915_sw_fence_await_dma_fence(&to->submit, 891 &from->fence, 0, 892 I915_FENCE_GFP); 893 return ret < 0 ? ret : 0; 894 } 895 896 int 897 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) 898 { 899 struct dma_fence **child = &fence; 900 unsigned int nchild = 1; 901 int ret; 902 903 /* 904 * Note that if the fence-array was created in signal-on-any mode, 905 * we should *not* decompose it into its individual fences. However, 906 * we don't currently store which mode the fence-array is operating 907 * in. Fortunately, the only user of signal-on-any is private to 908 * amdgpu and we should not see any incoming fence-array from 909 * sync-file being in signal-on-any mode. 910 */ 911 if (dma_fence_is_array(fence)) { 912 struct dma_fence_array *array = to_dma_fence_array(fence); 913 914 child = array->fences; 915 nchild = array->num_fences; 916 GEM_BUG_ON(!nchild); 917 } 918 919 do { 920 fence = *child++; 921 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 922 continue; 923 924 /* 925 * Requests on the same timeline are explicitly ordered, along 926 * with their dependencies, by i915_request_add() which ensures 927 * that requests are submitted in-order through each ring. 928 */ 929 if (fence->context == rq->fence.context) 930 continue; 931 932 /* Squash repeated waits to the same timelines */ 933 if (fence->context != rq->i915->mm.unordered_timeline && 934 i915_timeline_sync_is_later(rq->timeline, fence)) 935 continue; 936 937 if (dma_fence_is_i915(fence)) 938 ret = i915_request_await_request(rq, to_request(fence)); 939 else 940 ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, 941 I915_FENCE_TIMEOUT, 942 I915_FENCE_GFP); 943 if (ret < 0) 944 return ret; 945 946 /* Record the latest fence used against each timeline */ 947 if (fence->context != rq->i915->mm.unordered_timeline) 948 i915_timeline_sync_set(rq->timeline, fence); 949 } while (--nchild); 950 951 return 0; 952 } 953 954 /** 955 * i915_request_await_object - set this request to (async) wait upon a bo 956 * @to: request we are wishing to use 957 * @obj: object which may be in use on another ring. 958 * @write: whether the wait is on behalf of a writer 959 * 960 * This code is meant to abstract object synchronization with the GPU. 961 * Conceptually we serialise writes between engines inside the GPU. 962 * We only allow one engine to write into a buffer at any time, but 963 * multiple readers. To ensure each has a coherent view of memory, we must: 964 * 965 * - If there is an outstanding write request to the object, the new 966 * request must wait for it to complete (either CPU or in hw, requests 967 * on the same ring will be naturally ordered). 968 * 969 * - If we are a write request (pending_write_domain is set), the new 970 * request must wait for outstanding read requests to complete. 971 * 972 * Returns 0 if successful, else propagates up the lower layer error. 973 */ 974 int 975 i915_request_await_object(struct i915_request *to, 976 struct drm_i915_gem_object *obj, 977 bool write) 978 { 979 struct dma_fence *excl; 980 int ret = 0; 981 982 if (write) { 983 struct dma_fence **shared; 984 unsigned int count, i; 985 986 ret = reservation_object_get_fences_rcu(obj->resv, 987 &excl, &count, &shared); 988 if (ret) 989 return ret; 990 991 for (i = 0; i < count; i++) { 992 ret = i915_request_await_dma_fence(to, shared[i]); 993 if (ret) 994 break; 995 996 dma_fence_put(shared[i]); 997 } 998 999 for (; i < count; i++) 1000 dma_fence_put(shared[i]); 1001 kfree(shared); 1002 } else { 1003 excl = reservation_object_get_excl_rcu(obj->resv); 1004 } 1005 1006 if (excl) { 1007 if (ret == 0) 1008 ret = i915_request_await_dma_fence(to, excl); 1009 1010 dma_fence_put(excl); 1011 } 1012 1013 return ret; 1014 } 1015 1016 /* 1017 * NB: This function is not allowed to fail. Doing so would mean the the 1018 * request is not being tracked for completion but the work itself is 1019 * going to happen on the hardware. This would be a Bad Thing(tm). 1020 */ 1021 void i915_request_add(struct i915_request *request) 1022 { 1023 struct intel_engine_cs *engine = request->engine; 1024 struct i915_timeline *timeline = request->timeline; 1025 struct intel_ring *ring = request->ring; 1026 struct i915_request *prev; 1027 u32 *cs; 1028 1029 GEM_TRACE("%s fence %llx:%d\n", 1030 engine->name, request->fence.context, request->fence.seqno); 1031 1032 lockdep_assert_held(&request->i915->drm.struct_mutex); 1033 trace_i915_request_add(request); 1034 1035 /* 1036 * Make sure that no request gazumped us - if it was allocated after 1037 * our i915_request_alloc() and called __i915_request_add() before 1038 * us, the timeline will hold its seqno which is later than ours. 1039 */ 1040 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 1041 1042 /* 1043 * To ensure that this call will not fail, space for its emissions 1044 * should already have been reserved in the ring buffer. Let the ring 1045 * know that it is time to use that space up. 1046 */ 1047 request->reserved_space = 0; 1048 engine->emit_flush(request, EMIT_FLUSH); 1049 1050 /* 1051 * Record the position of the start of the breadcrumb so that 1052 * should we detect the updated seqno part-way through the 1053 * GPU processing the request, we never over-estimate the 1054 * position of the ring's HEAD. 1055 */ 1056 cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); 1057 GEM_BUG_ON(IS_ERR(cs)); 1058 request->postfix = intel_ring_offset(request, cs); 1059 1060 /* 1061 * Seal the request and mark it as pending execution. Note that 1062 * we may inspect this state, without holding any locks, during 1063 * hangcheck. Hence we apply the barrier to ensure that we do not 1064 * see a more recent value in the hws than we are tracking. 1065 */ 1066 1067 prev = i915_gem_active_raw(&timeline->last_request, 1068 &request->i915->drm.struct_mutex); 1069 if (prev && !i915_request_completed(prev)) { 1070 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 1071 &request->submitq); 1072 if (engine->schedule) 1073 __i915_sched_node_add_dependency(&request->sched, 1074 &prev->sched, 1075 &request->dep, 1076 0); 1077 } 1078 1079 spin_lock_irq(&timeline->lock); 1080 list_add_tail(&request->link, &timeline->requests); 1081 spin_unlock_irq(&timeline->lock); 1082 1083 GEM_BUG_ON(timeline->seqno != request->fence.seqno); 1084 i915_gem_active_set(&timeline->last_request, request); 1085 1086 list_add_tail(&request->ring_link, &ring->request_list); 1087 if (list_is_first(&request->ring_link, &ring->request_list)) { 1088 GEM_TRACE("marking %s as active\n", ring->timeline->name); 1089 list_add(&ring->active_link, &request->i915->gt.active_rings); 1090 } 1091 request->emitted_jiffies = jiffies; 1092 1093 /* 1094 * Let the backend know a new request has arrived that may need 1095 * to adjust the existing execution schedule due to a high priority 1096 * request - i.e. we may want to preempt the current request in order 1097 * to run a high priority dependency chain *before* we can execute this 1098 * request. 1099 * 1100 * This is called before the request is ready to run so that we can 1101 * decide whether to preempt the entire chain so that it is ready to 1102 * run at the earliest possible convenience. 1103 */ 1104 local_bh_disable(); 1105 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 1106 if (engine->schedule) 1107 engine->schedule(request, &request->gem_context->sched); 1108 rcu_read_unlock(); 1109 i915_sw_fence_commit(&request->submit); 1110 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1111 1112 /* 1113 * In typical scenarios, we do not expect the previous request on 1114 * the timeline to be still tracked by timeline->last_request if it 1115 * has been completed. If the completed request is still here, that 1116 * implies that request retirement is a long way behind submission, 1117 * suggesting that we haven't been retiring frequently enough from 1118 * the combination of retire-before-alloc, waiters and the background 1119 * retirement worker. So if the last request on this timeline was 1120 * already completed, do a catch up pass, flushing the retirement queue 1121 * up to this client. Since we have now moved the heaviest operations 1122 * during retirement onto secondary workers, such as freeing objects 1123 * or contexts, retiring a bunch of requests is mostly list management 1124 * (and cache misses), and so we should not be overly penalizing this 1125 * client by performing excess work, though we may still performing 1126 * work on behalf of others -- but instead we should benefit from 1127 * improved resource management. (Well, that's the theory at least.) 1128 */ 1129 if (prev && i915_request_completed(prev)) 1130 i915_request_retire_upto(prev); 1131 } 1132 1133 static unsigned long local_clock_us(unsigned int *cpu) 1134 { 1135 unsigned long t; 1136 1137 /* 1138 * Cheaply and approximately convert from nanoseconds to microseconds. 1139 * The result and subsequent calculations are also defined in the same 1140 * approximate microseconds units. The principal source of timing 1141 * error here is from the simple truncation. 1142 * 1143 * Note that local_clock() is only defined wrt to the current CPU; 1144 * the comparisons are no longer valid if we switch CPUs. Instead of 1145 * blocking preemption for the entire busywait, we can detect the CPU 1146 * switch and use that as indicator of system load and a reason to 1147 * stop busywaiting, see busywait_stop(). 1148 */ 1149 *cpu = get_cpu(); 1150 t = local_clock() >> 10; 1151 put_cpu(); 1152 1153 return t; 1154 } 1155 1156 static bool busywait_stop(unsigned long timeout, unsigned int cpu) 1157 { 1158 unsigned int this_cpu; 1159 1160 if (time_after(local_clock_us(&this_cpu), timeout)) 1161 return true; 1162 1163 return this_cpu != cpu; 1164 } 1165 1166 static bool __i915_spin_request(const struct i915_request *rq, 1167 u32 seqno, int state, unsigned long timeout_us) 1168 { 1169 struct intel_engine_cs *engine = rq->engine; 1170 unsigned int irq, cpu; 1171 1172 GEM_BUG_ON(!seqno); 1173 1174 /* 1175 * Only wait for the request if we know it is likely to complete. 1176 * 1177 * We don't track the timestamps around requests, nor the average 1178 * request length, so we do not have a good indicator that this 1179 * request will complete within the timeout. What we do know is the 1180 * order in which requests are executed by the engine and so we can 1181 * tell if the request has started. If the request hasn't started yet, 1182 * it is a fair assumption that it will not complete within our 1183 * relatively short timeout. 1184 */ 1185 if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) 1186 return false; 1187 1188 /* 1189 * When waiting for high frequency requests, e.g. during synchronous 1190 * rendering split between the CPU and GPU, the finite amount of time 1191 * required to set up the irq and wait upon it limits the response 1192 * rate. By busywaiting on the request completion for a short while we 1193 * can service the high frequency waits as quick as possible. However, 1194 * if it is a slow request, we want to sleep as quickly as possible. 1195 * The tradeoff between waiting and sleeping is roughly the time it 1196 * takes to sleep on a request, on the order of a microsecond. 1197 */ 1198 1199 irq = atomic_read(&engine->irq_count); 1200 timeout_us += local_clock_us(&cpu); 1201 do { 1202 if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) 1203 return seqno == i915_request_global_seqno(rq); 1204 1205 /* 1206 * Seqno are meant to be ordered *before* the interrupt. If 1207 * we see an interrupt without a corresponding seqno advance, 1208 * assume we won't see one in the near future but require 1209 * the engine->seqno_barrier() to fixup coherency. 1210 */ 1211 if (atomic_read(&engine->irq_count) != irq) 1212 break; 1213 1214 if (signal_pending_state(state, current)) 1215 break; 1216 1217 if (busywait_stop(timeout_us, cpu)) 1218 break; 1219 1220 cpu_relax(); 1221 } while (!need_resched()); 1222 1223 return false; 1224 } 1225 1226 static bool __i915_wait_request_check_and_reset(struct i915_request *request) 1227 { 1228 struct i915_gpu_error *error = &request->i915->gpu_error; 1229 1230 if (likely(!i915_reset_handoff(error))) 1231 return false; 1232 1233 __set_current_state(TASK_RUNNING); 1234 i915_reset(request->i915, error->stalled_mask, error->reason); 1235 return true; 1236 } 1237 1238 /** 1239 * i915_request_wait - wait until execution of request has finished 1240 * @rq: the request to wait upon 1241 * @flags: how to wait 1242 * @timeout: how long to wait in jiffies 1243 * 1244 * i915_request_wait() waits for the request to be completed, for a 1245 * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an 1246 * unbounded wait). 1247 * 1248 * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED 1249 * in via the flags, and vice versa if the struct_mutex is not held, the caller 1250 * must not specify that the wait is locked. 1251 * 1252 * Returns the remaining time (in jiffies) if the request completed, which may 1253 * be zero or -ETIME if the request is unfinished after the timeout expires. 1254 * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is 1255 * pending before the request completes. 1256 */ 1257 long i915_request_wait(struct i915_request *rq, 1258 unsigned int flags, 1259 long timeout) 1260 { 1261 const int state = flags & I915_WAIT_INTERRUPTIBLE ? 1262 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1263 wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; 1264 DEFINE_WAIT_FUNC(reset, default_wake_function); 1265 DEFINE_WAIT_FUNC(exec, default_wake_function); 1266 struct intel_wait wait; 1267 1268 might_sleep(); 1269 #if IS_ENABLED(CONFIG_LOCKDEP) 1270 GEM_BUG_ON(debug_locks && 1271 !!lockdep_is_held(&rq->i915->drm.struct_mutex) != 1272 !!(flags & I915_WAIT_LOCKED)); 1273 #endif 1274 GEM_BUG_ON(timeout < 0); 1275 1276 if (i915_request_completed(rq)) 1277 return timeout; 1278 1279 if (!timeout) 1280 return -ETIME; 1281 1282 trace_i915_request_wait_begin(rq, flags); 1283 1284 add_wait_queue(&rq->execute, &exec); 1285 if (flags & I915_WAIT_LOCKED) 1286 add_wait_queue(errq, &reset); 1287 1288 intel_wait_init(&wait, rq); 1289 1290 restart: 1291 do { 1292 set_current_state(state); 1293 if (intel_wait_update_request(&wait, rq)) 1294 break; 1295 1296 if (flags & I915_WAIT_LOCKED && 1297 __i915_wait_request_check_and_reset(rq)) 1298 continue; 1299 1300 if (signal_pending_state(state, current)) { 1301 timeout = -ERESTARTSYS; 1302 goto complete; 1303 } 1304 1305 if (!timeout) { 1306 timeout = -ETIME; 1307 goto complete; 1308 } 1309 1310 timeout = io_schedule_timeout(timeout); 1311 } while (1); 1312 1313 GEM_BUG_ON(!intel_wait_has_seqno(&wait)); 1314 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); 1315 1316 /* Optimistic short spin before touching IRQs */ 1317 if (__i915_spin_request(rq, wait.seqno, state, 5)) 1318 goto complete; 1319 1320 set_current_state(state); 1321 if (intel_engine_add_wait(rq->engine, &wait)) 1322 /* 1323 * In order to check that we haven't missed the interrupt 1324 * as we enabled it, we need to kick ourselves to do a 1325 * coherent check on the seqno before we sleep. 1326 */ 1327 goto wakeup; 1328 1329 if (flags & I915_WAIT_LOCKED) 1330 __i915_wait_request_check_and_reset(rq); 1331 1332 for (;;) { 1333 if (signal_pending_state(state, current)) { 1334 timeout = -ERESTARTSYS; 1335 break; 1336 } 1337 1338 if (!timeout) { 1339 timeout = -ETIME; 1340 break; 1341 } 1342 1343 timeout = io_schedule_timeout(timeout); 1344 1345 if (intel_wait_complete(&wait) && 1346 intel_wait_check_request(&wait, rq)) 1347 break; 1348 1349 set_current_state(state); 1350 1351 wakeup: 1352 /* 1353 * Carefully check if the request is complete, giving time 1354 * for the seqno to be visible following the interrupt. 1355 * We also have to check in case we are kicked by the GPU 1356 * reset in order to drop the struct_mutex. 1357 */ 1358 if (__i915_request_irq_complete(rq)) 1359 break; 1360 1361 /* 1362 * If the GPU is hung, and we hold the lock, reset the GPU 1363 * and then check for completion. On a full reset, the engine's 1364 * HW seqno will be advanced passed us and we are complete. 1365 * If we do a partial reset, we have to wait for the GPU to 1366 * resume and update the breadcrumb. 1367 * 1368 * If we don't hold the mutex, we can just wait for the worker 1369 * to come along and update the breadcrumb (either directly 1370 * itself, or indirectly by recovering the GPU). 1371 */ 1372 if (flags & I915_WAIT_LOCKED && 1373 __i915_wait_request_check_and_reset(rq)) 1374 continue; 1375 1376 /* Only spin if we know the GPU is processing this request */ 1377 if (__i915_spin_request(rq, wait.seqno, state, 2)) 1378 break; 1379 1380 if (!intel_wait_check_request(&wait, rq)) { 1381 intel_engine_remove_wait(rq->engine, &wait); 1382 goto restart; 1383 } 1384 } 1385 1386 intel_engine_remove_wait(rq->engine, &wait); 1387 complete: 1388 __set_current_state(TASK_RUNNING); 1389 if (flags & I915_WAIT_LOCKED) 1390 remove_wait_queue(errq, &reset); 1391 remove_wait_queue(&rq->execute, &exec); 1392 trace_i915_request_wait_end(rq); 1393 1394 return timeout; 1395 } 1396 1397 static void ring_retire_requests(struct intel_ring *ring) 1398 { 1399 struct i915_request *request, *next; 1400 1401 list_for_each_entry_safe(request, next, 1402 &ring->request_list, ring_link) { 1403 if (!i915_request_completed(request)) 1404 break; 1405 1406 i915_request_retire(request); 1407 } 1408 } 1409 1410 void i915_retire_requests(struct drm_i915_private *i915) 1411 { 1412 struct intel_ring *ring, *tmp; 1413 1414 lockdep_assert_held(&i915->drm.struct_mutex); 1415 1416 if (!i915->gt.active_requests) 1417 return; 1418 1419 list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link) 1420 ring_retire_requests(ring); 1421 } 1422 1423 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1424 #include "selftests/mock_request.c" 1425 #include "selftests/i915_request.c" 1426 #endif 1427