1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30 
31 #include "i915_drv.h"
32 
33 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
34 {
35 	return "i915";
36 }
37 
38 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
39 {
40 	/*
41 	 * The timeline struct (as part of the ppgtt underneath a context)
42 	 * may be freed when the request is no longer in use by the GPU.
43 	 * We could extend the life of a context to beyond that of all
44 	 * fences, possibly keeping the hw resource around indefinitely,
45 	 * or we just give them a false name. Since
46 	 * dma_fence_ops.get_timeline_name is a debug feature, the occasional
47 	 * lie seems justifiable.
48 	 */
49 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
50 		return "signaled";
51 
52 	return to_request(fence)->timeline->name;
53 }
54 
55 static bool i915_fence_signaled(struct dma_fence *fence)
56 {
57 	return i915_request_completed(to_request(fence));
58 }
59 
60 static bool i915_fence_enable_signaling(struct dma_fence *fence)
61 {
62 	return intel_engine_enable_signaling(to_request(fence), true);
63 }
64 
65 static signed long i915_fence_wait(struct dma_fence *fence,
66 				   bool interruptible,
67 				   signed long timeout)
68 {
69 	return i915_request_wait(to_request(fence), interruptible, timeout);
70 }
71 
72 static void i915_fence_release(struct dma_fence *fence)
73 {
74 	struct i915_request *rq = to_request(fence);
75 
76 	/*
77 	 * The request is put onto a RCU freelist (i.e. the address
78 	 * is immediately reused), mark the fences as being freed now.
79 	 * Otherwise the debugobjects for the fences are only marked as
80 	 * freed when the slab cache itself is freed, and so we would get
81 	 * caught trying to reuse dead objects.
82 	 */
83 	i915_sw_fence_fini(&rq->submit);
84 
85 	kmem_cache_free(rq->i915->requests, rq);
86 }
87 
88 const struct dma_fence_ops i915_fence_ops = {
89 	.get_driver_name = i915_fence_get_driver_name,
90 	.get_timeline_name = i915_fence_get_timeline_name,
91 	.enable_signaling = i915_fence_enable_signaling,
92 	.signaled = i915_fence_signaled,
93 	.wait = i915_fence_wait,
94 	.release = i915_fence_release,
95 };
96 
97 static inline void
98 i915_request_remove_from_client(struct i915_request *request)
99 {
100 	struct drm_i915_file_private *file_priv;
101 
102 	file_priv = request->file_priv;
103 	if (!file_priv)
104 		return;
105 
106 	spin_lock(&file_priv->mm.lock);
107 	if (request->file_priv) {
108 		list_del(&request->client_link);
109 		request->file_priv = NULL;
110 	}
111 	spin_unlock(&file_priv->mm.lock);
112 }
113 
114 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
115 {
116 	struct intel_engine_cs *engine;
117 	struct i915_timeline *timeline;
118 	enum intel_engine_id id;
119 	int ret;
120 
121 	/* Carefully retire all requests without writing to the rings */
122 	ret = i915_gem_wait_for_idle(i915,
123 				     I915_WAIT_INTERRUPTIBLE |
124 				     I915_WAIT_LOCKED,
125 				     MAX_SCHEDULE_TIMEOUT);
126 	if (ret)
127 		return ret;
128 
129 	GEM_BUG_ON(i915->gt.active_requests);
130 
131 	/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
132 	for_each_engine(engine, i915, id) {
133 		GEM_TRACE("%s seqno %d (current %d) -> %d\n",
134 			  engine->name,
135 			  engine->timeline.seqno,
136 			  intel_engine_get_seqno(engine),
137 			  seqno);
138 
139 		if (seqno == engine->timeline.seqno)
140 			continue;
141 
142 		kthread_park(engine->breadcrumbs.signaler);
143 
144 		if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
145 			/* Flush any waiters before we reuse the seqno */
146 			intel_engine_disarm_breadcrumbs(engine);
147 			intel_engine_init_hangcheck(engine);
148 			GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
149 		}
150 
151 		/* Check we are idle before we fiddle with hw state! */
152 		GEM_BUG_ON(!intel_engine_is_idle(engine));
153 		GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
154 
155 		/* Finally reset hw state */
156 		intel_engine_init_global_seqno(engine, seqno);
157 		engine->timeline.seqno = seqno;
158 
159 		kthread_unpark(engine->breadcrumbs.signaler);
160 	}
161 
162 	list_for_each_entry(timeline, &i915->gt.timelines, link)
163 		memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
164 
165 	i915->gt.request_serial = seqno;
166 
167 	return 0;
168 }
169 
170 int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
171 {
172 	struct drm_i915_private *i915 = to_i915(dev);
173 
174 	lockdep_assert_held(&i915->drm.struct_mutex);
175 
176 	if (seqno == 0)
177 		return -EINVAL;
178 
179 	/* HWS page needs to be set less than what we will inject to ring */
180 	return reset_all_global_seqno(i915, seqno - 1);
181 }
182 
183 static int reserve_gt(struct drm_i915_private *i915)
184 {
185 	int ret;
186 
187 	/*
188 	 * Reservation is fine until we may need to wrap around
189 	 *
190 	 * By incrementing the serial for every request, we know that no
191 	 * individual engine may exceed that serial (as each is reset to 0
192 	 * on any wrap). This protects even the most pessimistic of migrations
193 	 * of every request from all engines onto just one.
194 	 */
195 	while (unlikely(++i915->gt.request_serial == 0)) {
196 		ret = reset_all_global_seqno(i915, 0);
197 		if (ret) {
198 			i915->gt.request_serial--;
199 			return ret;
200 		}
201 	}
202 
203 	if (!i915->gt.active_requests++)
204 		i915_gem_unpark(i915);
205 
206 	return 0;
207 }
208 
209 static void unreserve_gt(struct drm_i915_private *i915)
210 {
211 	GEM_BUG_ON(!i915->gt.active_requests);
212 	if (!--i915->gt.active_requests)
213 		i915_gem_park(i915);
214 }
215 
216 void i915_gem_retire_noop(struct i915_gem_active *active,
217 			  struct i915_request *request)
218 {
219 	/* Space left intentionally blank */
220 }
221 
222 static void advance_ring(struct i915_request *request)
223 {
224 	struct intel_ring *ring = request->ring;
225 	unsigned int tail;
226 
227 	/*
228 	 * We know the GPU must have read the request to have
229 	 * sent us the seqno + interrupt, so use the position
230 	 * of tail of the request to update the last known position
231 	 * of the GPU head.
232 	 *
233 	 * Note this requires that we are always called in request
234 	 * completion order.
235 	 */
236 	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
237 	if (list_is_last(&request->ring_link, &ring->request_list)) {
238 		/*
239 		 * We may race here with execlists resubmitting this request
240 		 * as we retire it. The resubmission will move the ring->tail
241 		 * forwards (to request->wa_tail). We either read the
242 		 * current value that was written to hw, or the value that
243 		 * is just about to be. Either works, if we miss the last two
244 		 * noops - they are safe to be replayed on a reset.
245 		 */
246 		GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
247 		tail = READ_ONCE(request->tail);
248 		list_del(&ring->active_link);
249 	} else {
250 		tail = request->postfix;
251 	}
252 	list_del_init(&request->ring_link);
253 
254 	ring->head = tail;
255 }
256 
257 static void free_capture_list(struct i915_request *request)
258 {
259 	struct i915_capture_list *capture;
260 
261 	capture = request->capture_list;
262 	while (capture) {
263 		struct i915_capture_list *next = capture->next;
264 
265 		kfree(capture);
266 		capture = next;
267 	}
268 }
269 
270 static void __retire_engine_request(struct intel_engine_cs *engine,
271 				    struct i915_request *rq)
272 {
273 	GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n",
274 		  __func__, engine->name,
275 		  rq->fence.context, rq->fence.seqno,
276 		  rq->global_seqno,
277 		  intel_engine_get_seqno(engine));
278 
279 	GEM_BUG_ON(!i915_request_completed(rq));
280 
281 	local_irq_disable();
282 
283 	spin_lock(&engine->timeline.lock);
284 	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
285 	list_del_init(&rq->link);
286 	spin_unlock(&engine->timeline.lock);
287 
288 	spin_lock(&rq->lock);
289 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
290 		dma_fence_signal_locked(&rq->fence);
291 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
292 		intel_engine_cancel_signaling(rq);
293 	if (rq->waitboost) {
294 		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
295 		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
296 	}
297 	spin_unlock(&rq->lock);
298 
299 	local_irq_enable();
300 
301 	/*
302 	 * The backing object for the context is done after switching to the
303 	 * *next* context. Therefore we cannot retire the previous context until
304 	 * the next context has already started running. However, since we
305 	 * cannot take the required locks at i915_request_submit() we
306 	 * defer the unpinning of the active context to now, retirement of
307 	 * the subsequent request.
308 	 */
309 	if (engine->last_retired_context)
310 		intel_context_unpin(engine->last_retired_context);
311 	engine->last_retired_context = rq->hw_context;
312 }
313 
314 static void __retire_engine_upto(struct intel_engine_cs *engine,
315 				 struct i915_request *rq)
316 {
317 	struct i915_request *tmp;
318 
319 	if (list_empty(&rq->link))
320 		return;
321 
322 	do {
323 		tmp = list_first_entry(&engine->timeline.requests,
324 				       typeof(*tmp), link);
325 
326 		GEM_BUG_ON(tmp->engine != engine);
327 		__retire_engine_request(engine, tmp);
328 	} while (tmp != rq);
329 }
330 
331 static void i915_request_retire(struct i915_request *request)
332 {
333 	struct i915_gem_active *active, *next;
334 
335 	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
336 		  request->engine->name,
337 		  request->fence.context, request->fence.seqno,
338 		  request->global_seqno,
339 		  intel_engine_get_seqno(request->engine));
340 
341 	lockdep_assert_held(&request->i915->drm.struct_mutex);
342 	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
343 	GEM_BUG_ON(!i915_request_completed(request));
344 
345 	trace_i915_request_retire(request);
346 
347 	advance_ring(request);
348 	free_capture_list(request);
349 
350 	/*
351 	 * Walk through the active list, calling retire on each. This allows
352 	 * objects to track their GPU activity and mark themselves as idle
353 	 * when their *last* active request is completed (updating state
354 	 * tracking lists for eviction, active references for GEM, etc).
355 	 *
356 	 * As the ->retire() may free the node, we decouple it first and
357 	 * pass along the auxiliary information (to avoid dereferencing
358 	 * the node after the callback).
359 	 */
360 	list_for_each_entry_safe(active, next, &request->active_list, link) {
361 		/*
362 		 * In microbenchmarks or focusing upon time inside the kernel,
363 		 * we may spend an inordinate amount of time simply handling
364 		 * the retirement of requests and processing their callbacks.
365 		 * Of which, this loop itself is particularly hot due to the
366 		 * cache misses when jumping around the list of i915_gem_active.
367 		 * So we try to keep this loop as streamlined as possible and
368 		 * also prefetch the next i915_gem_active to try and hide
369 		 * the likely cache miss.
370 		 */
371 		prefetchw(next);
372 
373 		INIT_LIST_HEAD(&active->link);
374 		RCU_INIT_POINTER(active->request, NULL);
375 
376 		active->retire(active, request);
377 	}
378 
379 	i915_request_remove_from_client(request);
380 
381 	/* Retirement decays the ban score as it is a sign of ctx progress */
382 	atomic_dec_if_positive(&request->gem_context->ban_score);
383 	intel_context_unpin(request->hw_context);
384 
385 	__retire_engine_upto(request->engine, request);
386 
387 	unreserve_gt(request->i915);
388 
389 	i915_sched_node_fini(request->i915, &request->sched);
390 	i915_request_put(request);
391 }
392 
393 void i915_request_retire_upto(struct i915_request *rq)
394 {
395 	struct intel_ring *ring = rq->ring;
396 	struct i915_request *tmp;
397 
398 	GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n",
399 		  rq->engine->name,
400 		  rq->fence.context, rq->fence.seqno,
401 		  rq->global_seqno,
402 		  intel_engine_get_seqno(rq->engine));
403 
404 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
405 	GEM_BUG_ON(!i915_request_completed(rq));
406 
407 	if (list_empty(&rq->ring_link))
408 		return;
409 
410 	do {
411 		tmp = list_first_entry(&ring->request_list,
412 				       typeof(*tmp), ring_link);
413 
414 		i915_request_retire(tmp);
415 	} while (tmp != rq);
416 }
417 
418 static u32 timeline_get_seqno(struct i915_timeline *tl)
419 {
420 	return ++tl->seqno;
421 }
422 
423 static void move_to_timeline(struct i915_request *request,
424 			     struct i915_timeline *timeline)
425 {
426 	GEM_BUG_ON(request->timeline == &request->engine->timeline);
427 	lockdep_assert_held(&request->engine->timeline.lock);
428 
429 	spin_lock(&request->timeline->lock);
430 	list_move_tail(&request->link, &timeline->requests);
431 	spin_unlock(&request->timeline->lock);
432 }
433 
434 void __i915_request_submit(struct i915_request *request)
435 {
436 	struct intel_engine_cs *engine = request->engine;
437 	u32 seqno;
438 
439 	GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n",
440 		  engine->name,
441 		  request->fence.context, request->fence.seqno,
442 		  engine->timeline.seqno + 1,
443 		  intel_engine_get_seqno(engine));
444 
445 	GEM_BUG_ON(!irqs_disabled());
446 	lockdep_assert_held(&engine->timeline.lock);
447 
448 	GEM_BUG_ON(request->global_seqno);
449 
450 	seqno = timeline_get_seqno(&engine->timeline);
451 	GEM_BUG_ON(!seqno);
452 	GEM_BUG_ON(intel_engine_signaled(engine, seqno));
453 
454 	/* We may be recursing from the signal callback of another i915 fence */
455 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
456 	request->global_seqno = seqno;
457 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
458 		intel_engine_enable_signaling(request, false);
459 	spin_unlock(&request->lock);
460 
461 	engine->emit_breadcrumb(request,
462 				request->ring->vaddr + request->postfix);
463 
464 	/* Transfer from per-context onto the global per-engine timeline */
465 	move_to_timeline(request, &engine->timeline);
466 
467 	trace_i915_request_execute(request);
468 
469 	wake_up_all(&request->execute);
470 }
471 
472 void i915_request_submit(struct i915_request *request)
473 {
474 	struct intel_engine_cs *engine = request->engine;
475 	unsigned long flags;
476 
477 	/* Will be called from irq-context when using foreign fences. */
478 	spin_lock_irqsave(&engine->timeline.lock, flags);
479 
480 	__i915_request_submit(request);
481 
482 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
483 }
484 
485 void __i915_request_unsubmit(struct i915_request *request)
486 {
487 	struct intel_engine_cs *engine = request->engine;
488 
489 	GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n",
490 		  engine->name,
491 		  request->fence.context, request->fence.seqno,
492 		  request->global_seqno,
493 		  intel_engine_get_seqno(engine));
494 
495 	GEM_BUG_ON(!irqs_disabled());
496 	lockdep_assert_held(&engine->timeline.lock);
497 
498 	/*
499 	 * Only unwind in reverse order, required so that the per-context list
500 	 * is kept in seqno/ring order.
501 	 */
502 	GEM_BUG_ON(!request->global_seqno);
503 	GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
504 	GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
505 	engine->timeline.seqno--;
506 
507 	/* We may be recursing from the signal callback of another i915 fence */
508 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
509 	request->global_seqno = 0;
510 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
511 		intel_engine_cancel_signaling(request);
512 	spin_unlock(&request->lock);
513 
514 	/* Transfer back from the global per-engine timeline to per-context */
515 	move_to_timeline(request, request->timeline);
516 
517 	/*
518 	 * We don't need to wake_up any waiters on request->execute, they
519 	 * will get woken by any other event or us re-adding this request
520 	 * to the engine timeline (__i915_request_submit()). The waiters
521 	 * should be quite adapt at finding that the request now has a new
522 	 * global_seqno to the one they went to sleep on.
523 	 */
524 }
525 
526 void i915_request_unsubmit(struct i915_request *request)
527 {
528 	struct intel_engine_cs *engine = request->engine;
529 	unsigned long flags;
530 
531 	/* Will be called from irq-context when using foreign fences. */
532 	spin_lock_irqsave(&engine->timeline.lock, flags);
533 
534 	__i915_request_unsubmit(request);
535 
536 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
537 }
538 
539 static int __i915_sw_fence_call
540 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
541 {
542 	struct i915_request *request =
543 		container_of(fence, typeof(*request), submit);
544 
545 	switch (state) {
546 	case FENCE_COMPLETE:
547 		trace_i915_request_submit(request);
548 		/*
549 		 * We need to serialize use of the submit_request() callback
550 		 * with its hotplugging performed during an emergency
551 		 * i915_gem_set_wedged().  We use the RCU mechanism to mark the
552 		 * critical section in order to force i915_gem_set_wedged() to
553 		 * wait until the submit_request() is completed before
554 		 * proceeding.
555 		 */
556 		rcu_read_lock();
557 		request->engine->submit_request(request);
558 		rcu_read_unlock();
559 		break;
560 
561 	case FENCE_FREE:
562 		i915_request_put(request);
563 		break;
564 	}
565 
566 	return NOTIFY_DONE;
567 }
568 
569 /**
570  * i915_request_alloc - allocate a request structure
571  *
572  * @engine: engine that we wish to issue the request on.
573  * @ctx: context that the request will be associated with.
574  *
575  * Returns a pointer to the allocated request if successful,
576  * or an error code if not.
577  */
578 struct i915_request *
579 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
580 {
581 	struct drm_i915_private *i915 = engine->i915;
582 	struct i915_request *rq;
583 	struct intel_context *ce;
584 	int ret;
585 
586 	lockdep_assert_held(&i915->drm.struct_mutex);
587 
588 	/*
589 	 * Preempt contexts are reserved for exclusive use to inject a
590 	 * preemption context switch. They are never to be used for any trivial
591 	 * request!
592 	 */
593 	GEM_BUG_ON(ctx == i915->preempt_context);
594 
595 	/*
596 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
597 	 * EIO if the GPU is already wedged.
598 	 */
599 	if (i915_terminally_wedged(&i915->gpu_error))
600 		return ERR_PTR(-EIO);
601 
602 	/*
603 	 * Pinning the contexts may generate requests in order to acquire
604 	 * GGTT space, so do this first before we reserve a seqno for
605 	 * ourselves.
606 	 */
607 	ce = intel_context_pin(ctx, engine);
608 	if (IS_ERR(ce))
609 		return ERR_CAST(ce);
610 
611 	ret = reserve_gt(i915);
612 	if (ret)
613 		goto err_unpin;
614 
615 	ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
616 	if (ret)
617 		goto err_unreserve;
618 
619 	/* Move our oldest request to the slab-cache (if not in use!) */
620 	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
621 	if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
622 	    i915_request_completed(rq))
623 		i915_request_retire(rq);
624 
625 	/*
626 	 * Beware: Dragons be flying overhead.
627 	 *
628 	 * We use RCU to look up requests in flight. The lookups may
629 	 * race with the request being allocated from the slab freelist.
630 	 * That is the request we are writing to here, may be in the process
631 	 * of being read by __i915_gem_active_get_rcu(). As such,
632 	 * we have to be very careful when overwriting the contents. During
633 	 * the RCU lookup, we change chase the request->engine pointer,
634 	 * read the request->global_seqno and increment the reference count.
635 	 *
636 	 * The reference count is incremented atomically. If it is zero,
637 	 * the lookup knows the request is unallocated and complete. Otherwise,
638 	 * it is either still in use, or has been reallocated and reset
639 	 * with dma_fence_init(). This increment is safe for release as we
640 	 * check that the request we have a reference to and matches the active
641 	 * request.
642 	 *
643 	 * Before we increment the refcount, we chase the request->engine
644 	 * pointer. We must not call kmem_cache_zalloc() or else we set
645 	 * that pointer to NULL and cause a crash during the lookup. If
646 	 * we see the request is completed (based on the value of the
647 	 * old engine and seqno), the lookup is complete and reports NULL.
648 	 * If we decide the request is not completed (new engine or seqno),
649 	 * then we grab a reference and double check that it is still the
650 	 * active request - which it won't be and restart the lookup.
651 	 *
652 	 * Do not use kmem_cache_zalloc() here!
653 	 */
654 	rq = kmem_cache_alloc(i915->requests,
655 			      GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
656 	if (unlikely(!rq)) {
657 		i915_retire_requests(i915);
658 
659 		/* Ratelimit ourselves to prevent oom from malicious clients */
660 		rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
661 					 &i915->drm.struct_mutex);
662 		if (rq)
663 			cond_synchronize_rcu(rq->rcustate);
664 
665 		rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
666 		if (!rq) {
667 			ret = -ENOMEM;
668 			goto err_unreserve;
669 		}
670 	}
671 
672 	rq->rcustate = get_state_synchronize_rcu();
673 
674 	INIT_LIST_HEAD(&rq->active_list);
675 	rq->i915 = i915;
676 	rq->engine = engine;
677 	rq->gem_context = ctx;
678 	rq->hw_context = ce;
679 	rq->ring = ce->ring;
680 	rq->timeline = ce->ring->timeline;
681 	GEM_BUG_ON(rq->timeline == &engine->timeline);
682 
683 	spin_lock_init(&rq->lock);
684 	dma_fence_init(&rq->fence,
685 		       &i915_fence_ops,
686 		       &rq->lock,
687 		       rq->timeline->fence_context,
688 		       timeline_get_seqno(rq->timeline));
689 
690 	/* We bump the ref for the fence chain */
691 	i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
692 	init_waitqueue_head(&rq->execute);
693 
694 	i915_sched_node_init(&rq->sched);
695 
696 	/* No zalloc, must clear what we need by hand */
697 	rq->global_seqno = 0;
698 	rq->signaling.wait.seqno = 0;
699 	rq->file_priv = NULL;
700 	rq->batch = NULL;
701 	rq->capture_list = NULL;
702 	rq->waitboost = false;
703 
704 	/*
705 	 * Reserve space in the ring buffer for all the commands required to
706 	 * eventually emit this request. This is to guarantee that the
707 	 * i915_request_add() call can't fail. Note that the reserve may need
708 	 * to be redone if the request is not actually submitted straight
709 	 * away, e.g. because a GPU scheduler has deferred it.
710 	 */
711 	rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
712 	GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
713 
714 	/*
715 	 * Record the position of the start of the request so that
716 	 * should we detect the updated seqno part-way through the
717 	 * GPU processing the request, we never over-estimate the
718 	 * position of the head.
719 	 */
720 	rq->head = rq->ring->emit;
721 
722 	/* Unconditionally invalidate GPU caches and TLBs. */
723 	ret = engine->emit_flush(rq, EMIT_INVALIDATE);
724 	if (ret)
725 		goto err_unwind;
726 
727 	ret = engine->request_alloc(rq);
728 	if (ret)
729 		goto err_unwind;
730 
731 	/* Keep a second pin for the dual retirement along engine and ring */
732 	__intel_context_pin(ce);
733 
734 	rq->infix = rq->ring->emit; /* end of header; start of user payload */
735 
736 	/* Check that we didn't interrupt ourselves with a new request */
737 	GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
738 	return rq;
739 
740 err_unwind:
741 	ce->ring->emit = rq->head;
742 
743 	/* Make sure we didn't add ourselves to external state before freeing */
744 	GEM_BUG_ON(!list_empty(&rq->active_list));
745 	GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
746 	GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
747 
748 	kmem_cache_free(i915->requests, rq);
749 err_unreserve:
750 	unreserve_gt(i915);
751 err_unpin:
752 	intel_context_unpin(ce);
753 	return ERR_PTR(ret);
754 }
755 
756 static int
757 i915_request_await_request(struct i915_request *to, struct i915_request *from)
758 {
759 	int ret;
760 
761 	GEM_BUG_ON(to == from);
762 	GEM_BUG_ON(to->timeline == from->timeline);
763 
764 	if (i915_request_completed(from))
765 		return 0;
766 
767 	if (to->engine->schedule) {
768 		ret = i915_sched_node_add_dependency(to->i915,
769 						     &to->sched,
770 						     &from->sched);
771 		if (ret < 0)
772 			return ret;
773 	}
774 
775 	if (to->engine == from->engine) {
776 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
777 						       &from->submit,
778 						       I915_FENCE_GFP);
779 		return ret < 0 ? ret : 0;
780 	}
781 
782 	if (to->engine->semaphore.sync_to) {
783 		u32 seqno;
784 
785 		GEM_BUG_ON(!from->engine->semaphore.signal);
786 
787 		seqno = i915_request_global_seqno(from);
788 		if (!seqno)
789 			goto await_dma_fence;
790 
791 		if (seqno <= to->timeline->global_sync[from->engine->id])
792 			return 0;
793 
794 		trace_i915_gem_ring_sync_to(to, from);
795 		ret = to->engine->semaphore.sync_to(to, from);
796 		if (ret)
797 			return ret;
798 
799 		to->timeline->global_sync[from->engine->id] = seqno;
800 		return 0;
801 	}
802 
803 await_dma_fence:
804 	ret = i915_sw_fence_await_dma_fence(&to->submit,
805 					    &from->fence, 0,
806 					    I915_FENCE_GFP);
807 	return ret < 0 ? ret : 0;
808 }
809 
810 int
811 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
812 {
813 	struct dma_fence **child = &fence;
814 	unsigned int nchild = 1;
815 	int ret;
816 
817 	/*
818 	 * Note that if the fence-array was created in signal-on-any mode,
819 	 * we should *not* decompose it into its individual fences. However,
820 	 * we don't currently store which mode the fence-array is operating
821 	 * in. Fortunately, the only user of signal-on-any is private to
822 	 * amdgpu and we should not see any incoming fence-array from
823 	 * sync-file being in signal-on-any mode.
824 	 */
825 	if (dma_fence_is_array(fence)) {
826 		struct dma_fence_array *array = to_dma_fence_array(fence);
827 
828 		child = array->fences;
829 		nchild = array->num_fences;
830 		GEM_BUG_ON(!nchild);
831 	}
832 
833 	do {
834 		fence = *child++;
835 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
836 			continue;
837 
838 		/*
839 		 * Requests on the same timeline are explicitly ordered, along
840 		 * with their dependencies, by i915_request_add() which ensures
841 		 * that requests are submitted in-order through each ring.
842 		 */
843 		if (fence->context == rq->fence.context)
844 			continue;
845 
846 		/* Squash repeated waits to the same timelines */
847 		if (fence->context != rq->i915->mm.unordered_timeline &&
848 		    i915_timeline_sync_is_later(rq->timeline, fence))
849 			continue;
850 
851 		if (dma_fence_is_i915(fence))
852 			ret = i915_request_await_request(rq, to_request(fence));
853 		else
854 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
855 							    I915_FENCE_TIMEOUT,
856 							    I915_FENCE_GFP);
857 		if (ret < 0)
858 			return ret;
859 
860 		/* Record the latest fence used against each timeline */
861 		if (fence->context != rq->i915->mm.unordered_timeline)
862 			i915_timeline_sync_set(rq->timeline, fence);
863 	} while (--nchild);
864 
865 	return 0;
866 }
867 
868 /**
869  * i915_request_await_object - set this request to (async) wait upon a bo
870  * @to: request we are wishing to use
871  * @obj: object which may be in use on another ring.
872  * @write: whether the wait is on behalf of a writer
873  *
874  * This code is meant to abstract object synchronization with the GPU.
875  * Conceptually we serialise writes between engines inside the GPU.
876  * We only allow one engine to write into a buffer at any time, but
877  * multiple readers. To ensure each has a coherent view of memory, we must:
878  *
879  * - If there is an outstanding write request to the object, the new
880  *   request must wait for it to complete (either CPU or in hw, requests
881  *   on the same ring will be naturally ordered).
882  *
883  * - If we are a write request (pending_write_domain is set), the new
884  *   request must wait for outstanding read requests to complete.
885  *
886  * Returns 0 if successful, else propagates up the lower layer error.
887  */
888 int
889 i915_request_await_object(struct i915_request *to,
890 			  struct drm_i915_gem_object *obj,
891 			  bool write)
892 {
893 	struct dma_fence *excl;
894 	int ret = 0;
895 
896 	if (write) {
897 		struct dma_fence **shared;
898 		unsigned int count, i;
899 
900 		ret = reservation_object_get_fences_rcu(obj->resv,
901 							&excl, &count, &shared);
902 		if (ret)
903 			return ret;
904 
905 		for (i = 0; i < count; i++) {
906 			ret = i915_request_await_dma_fence(to, shared[i]);
907 			if (ret)
908 				break;
909 
910 			dma_fence_put(shared[i]);
911 		}
912 
913 		for (; i < count; i++)
914 			dma_fence_put(shared[i]);
915 		kfree(shared);
916 	} else {
917 		excl = reservation_object_get_excl_rcu(obj->resv);
918 	}
919 
920 	if (excl) {
921 		if (ret == 0)
922 			ret = i915_request_await_dma_fence(to, excl);
923 
924 		dma_fence_put(excl);
925 	}
926 
927 	return ret;
928 }
929 
930 void i915_request_skip(struct i915_request *rq, int error)
931 {
932 	void *vaddr = rq->ring->vaddr;
933 	u32 head;
934 
935 	GEM_BUG_ON(!IS_ERR_VALUE((long)error));
936 	dma_fence_set_error(&rq->fence, error);
937 
938 	/*
939 	 * As this request likely depends on state from the lost
940 	 * context, clear out all the user operations leaving the
941 	 * breadcrumb at the end (so we get the fence notifications).
942 	 */
943 	head = rq->infix;
944 	if (rq->postfix < head) {
945 		memset(vaddr + head, 0, rq->ring->size - head);
946 		head = 0;
947 	}
948 	memset(vaddr + head, 0, rq->postfix - head);
949 }
950 
951 /*
952  * NB: This function is not allowed to fail. Doing so would mean the the
953  * request is not being tracked for completion but the work itself is
954  * going to happen on the hardware. This would be a Bad Thing(tm).
955  */
956 void i915_request_add(struct i915_request *request)
957 {
958 	struct intel_engine_cs *engine = request->engine;
959 	struct i915_timeline *timeline = request->timeline;
960 	struct intel_ring *ring = request->ring;
961 	struct i915_request *prev;
962 	u32 *cs;
963 
964 	GEM_TRACE("%s fence %llx:%d\n",
965 		  engine->name, request->fence.context, request->fence.seqno);
966 
967 	lockdep_assert_held(&request->i915->drm.struct_mutex);
968 	trace_i915_request_add(request);
969 
970 	/*
971 	 * Make sure that no request gazumped us - if it was allocated after
972 	 * our i915_request_alloc() and called __i915_request_add() before
973 	 * us, the timeline will hold its seqno which is later than ours.
974 	 */
975 	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
976 
977 	/*
978 	 * To ensure that this call will not fail, space for its emissions
979 	 * should already have been reserved in the ring buffer. Let the ring
980 	 * know that it is time to use that space up.
981 	 */
982 	request->reserved_space = 0;
983 	engine->emit_flush(request, EMIT_FLUSH);
984 
985 	/*
986 	 * Record the position of the start of the breadcrumb so that
987 	 * should we detect the updated seqno part-way through the
988 	 * GPU processing the request, we never over-estimate the
989 	 * position of the ring's HEAD.
990 	 */
991 	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
992 	GEM_BUG_ON(IS_ERR(cs));
993 	request->postfix = intel_ring_offset(request, cs);
994 
995 	/*
996 	 * Seal the request and mark it as pending execution. Note that
997 	 * we may inspect this state, without holding any locks, during
998 	 * hangcheck. Hence we apply the barrier to ensure that we do not
999 	 * see a more recent value in the hws than we are tracking.
1000 	 */
1001 
1002 	prev = i915_gem_active_raw(&timeline->last_request,
1003 				   &request->i915->drm.struct_mutex);
1004 	if (prev && !i915_request_completed(prev)) {
1005 		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
1006 					     &request->submitq);
1007 		if (engine->schedule)
1008 			__i915_sched_node_add_dependency(&request->sched,
1009 							 &prev->sched,
1010 							 &request->dep,
1011 							 0);
1012 	}
1013 
1014 	spin_lock_irq(&timeline->lock);
1015 	list_add_tail(&request->link, &timeline->requests);
1016 	spin_unlock_irq(&timeline->lock);
1017 
1018 	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
1019 	i915_gem_active_set(&timeline->last_request, request);
1020 
1021 	list_add_tail(&request->ring_link, &ring->request_list);
1022 	if (list_is_first(&request->ring_link, &ring->request_list)) {
1023 		GEM_TRACE("marking %s as active\n", ring->timeline->name);
1024 		list_add(&ring->active_link, &request->i915->gt.active_rings);
1025 	}
1026 	request->emitted_jiffies = jiffies;
1027 
1028 	/*
1029 	 * Let the backend know a new request has arrived that may need
1030 	 * to adjust the existing execution schedule due to a high priority
1031 	 * request - i.e. we may want to preempt the current request in order
1032 	 * to run a high priority dependency chain *before* we can execute this
1033 	 * request.
1034 	 *
1035 	 * This is called before the request is ready to run so that we can
1036 	 * decide whether to preempt the entire chain so that it is ready to
1037 	 * run at the earliest possible convenience.
1038 	 */
1039 	local_bh_disable();
1040 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1041 	if (engine->schedule) {
1042 		struct i915_sched_attr attr = request->gem_context->sched;
1043 
1044 		/*
1045 		 * Boost priorities to new clients (new request flows).
1046 		 *
1047 		 * Allow interactive/synchronous clients to jump ahead of
1048 		 * the bulk clients. (FQ_CODEL)
1049 		 */
1050 		if (!prev || i915_request_completed(prev))
1051 			attr.priority |= I915_PRIORITY_NEWCLIENT;
1052 
1053 		engine->schedule(request, &attr);
1054 	}
1055 	rcu_read_unlock();
1056 	i915_sw_fence_commit(&request->submit);
1057 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1058 
1059 	/*
1060 	 * In typical scenarios, we do not expect the previous request on
1061 	 * the timeline to be still tracked by timeline->last_request if it
1062 	 * has been completed. If the completed request is still here, that
1063 	 * implies that request retirement is a long way behind submission,
1064 	 * suggesting that we haven't been retiring frequently enough from
1065 	 * the combination of retire-before-alloc, waiters and the background
1066 	 * retirement worker. So if the last request on this timeline was
1067 	 * already completed, do a catch up pass, flushing the retirement queue
1068 	 * up to this client. Since we have now moved the heaviest operations
1069 	 * during retirement onto secondary workers, such as freeing objects
1070 	 * or contexts, retiring a bunch of requests is mostly list management
1071 	 * (and cache misses), and so we should not be overly penalizing this
1072 	 * client by performing excess work, though we may still performing
1073 	 * work on behalf of others -- but instead we should benefit from
1074 	 * improved resource management. (Well, that's the theory at least.)
1075 	 */
1076 	if (prev && i915_request_completed(prev))
1077 		i915_request_retire_upto(prev);
1078 }
1079 
1080 static unsigned long local_clock_us(unsigned int *cpu)
1081 {
1082 	unsigned long t;
1083 
1084 	/*
1085 	 * Cheaply and approximately convert from nanoseconds to microseconds.
1086 	 * The result and subsequent calculations are also defined in the same
1087 	 * approximate microseconds units. The principal source of timing
1088 	 * error here is from the simple truncation.
1089 	 *
1090 	 * Note that local_clock() is only defined wrt to the current CPU;
1091 	 * the comparisons are no longer valid if we switch CPUs. Instead of
1092 	 * blocking preemption for the entire busywait, we can detect the CPU
1093 	 * switch and use that as indicator of system load and a reason to
1094 	 * stop busywaiting, see busywait_stop().
1095 	 */
1096 	*cpu = get_cpu();
1097 	t = local_clock() >> 10;
1098 	put_cpu();
1099 
1100 	return t;
1101 }
1102 
1103 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1104 {
1105 	unsigned int this_cpu;
1106 
1107 	if (time_after(local_clock_us(&this_cpu), timeout))
1108 		return true;
1109 
1110 	return this_cpu != cpu;
1111 }
1112 
1113 static bool __i915_spin_request(const struct i915_request *rq,
1114 				u32 seqno, int state, unsigned long timeout_us)
1115 {
1116 	struct intel_engine_cs *engine = rq->engine;
1117 	unsigned int irq, cpu;
1118 
1119 	GEM_BUG_ON(!seqno);
1120 
1121 	/*
1122 	 * Only wait for the request if we know it is likely to complete.
1123 	 *
1124 	 * We don't track the timestamps around requests, nor the average
1125 	 * request length, so we do not have a good indicator that this
1126 	 * request will complete within the timeout. What we do know is the
1127 	 * order in which requests are executed by the engine and so we can
1128 	 * tell if the request has started. If the request hasn't started yet,
1129 	 * it is a fair assumption that it will not complete within our
1130 	 * relatively short timeout.
1131 	 */
1132 	if (!intel_engine_has_started(engine, seqno))
1133 		return false;
1134 
1135 	/*
1136 	 * When waiting for high frequency requests, e.g. during synchronous
1137 	 * rendering split between the CPU and GPU, the finite amount of time
1138 	 * required to set up the irq and wait upon it limits the response
1139 	 * rate. By busywaiting on the request completion for a short while we
1140 	 * can service the high frequency waits as quick as possible. However,
1141 	 * if it is a slow request, we want to sleep as quickly as possible.
1142 	 * The tradeoff between waiting and sleeping is roughly the time it
1143 	 * takes to sleep on a request, on the order of a microsecond.
1144 	 */
1145 
1146 	irq = READ_ONCE(engine->breadcrumbs.irq_count);
1147 	timeout_us += local_clock_us(&cpu);
1148 	do {
1149 		if (intel_engine_has_completed(engine, seqno))
1150 			return seqno == i915_request_global_seqno(rq);
1151 
1152 		/*
1153 		 * Seqno are meant to be ordered *before* the interrupt. If
1154 		 * we see an interrupt without a corresponding seqno advance,
1155 		 * assume we won't see one in the near future but require
1156 		 * the engine->seqno_barrier() to fixup coherency.
1157 		 */
1158 		if (READ_ONCE(engine->breadcrumbs.irq_count) != irq)
1159 			break;
1160 
1161 		if (signal_pending_state(state, current))
1162 			break;
1163 
1164 		if (busywait_stop(timeout_us, cpu))
1165 			break;
1166 
1167 		cpu_relax();
1168 	} while (!need_resched());
1169 
1170 	return false;
1171 }
1172 
1173 static bool __i915_wait_request_check_and_reset(struct i915_request *request)
1174 {
1175 	struct i915_gpu_error *error = &request->i915->gpu_error;
1176 
1177 	if (likely(!i915_reset_handoff(error)))
1178 		return false;
1179 
1180 	__set_current_state(TASK_RUNNING);
1181 	i915_reset(request->i915, error->stalled_mask, error->reason);
1182 	return true;
1183 }
1184 
1185 /**
1186  * i915_request_wait - wait until execution of request has finished
1187  * @rq: the request to wait upon
1188  * @flags: how to wait
1189  * @timeout: how long to wait in jiffies
1190  *
1191  * i915_request_wait() waits for the request to be completed, for a
1192  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1193  * unbounded wait).
1194  *
1195  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1196  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1197  * must not specify that the wait is locked.
1198  *
1199  * Returns the remaining time (in jiffies) if the request completed, which may
1200  * be zero or -ETIME if the request is unfinished after the timeout expires.
1201  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1202  * pending before the request completes.
1203  */
1204 long i915_request_wait(struct i915_request *rq,
1205 		       unsigned int flags,
1206 		       long timeout)
1207 {
1208 	const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1209 		TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1210 	wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue;
1211 	DEFINE_WAIT_FUNC(reset, default_wake_function);
1212 	DEFINE_WAIT_FUNC(exec, default_wake_function);
1213 	struct intel_wait wait;
1214 
1215 	might_sleep();
1216 #if IS_ENABLED(CONFIG_LOCKDEP)
1217 	GEM_BUG_ON(debug_locks &&
1218 		   !!lockdep_is_held(&rq->i915->drm.struct_mutex) !=
1219 		   !!(flags & I915_WAIT_LOCKED));
1220 #endif
1221 	GEM_BUG_ON(timeout < 0);
1222 
1223 	if (i915_request_completed(rq))
1224 		return timeout;
1225 
1226 	if (!timeout)
1227 		return -ETIME;
1228 
1229 	trace_i915_request_wait_begin(rq, flags);
1230 
1231 	add_wait_queue(&rq->execute, &exec);
1232 	if (flags & I915_WAIT_LOCKED)
1233 		add_wait_queue(errq, &reset);
1234 
1235 	intel_wait_init(&wait);
1236 	if (flags & I915_WAIT_PRIORITY)
1237 		i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1238 
1239 restart:
1240 	do {
1241 		set_current_state(state);
1242 		if (intel_wait_update_request(&wait, rq))
1243 			break;
1244 
1245 		if (flags & I915_WAIT_LOCKED &&
1246 		    __i915_wait_request_check_and_reset(rq))
1247 			continue;
1248 
1249 		if (signal_pending_state(state, current)) {
1250 			timeout = -ERESTARTSYS;
1251 			goto complete;
1252 		}
1253 
1254 		if (!timeout) {
1255 			timeout = -ETIME;
1256 			goto complete;
1257 		}
1258 
1259 		timeout = io_schedule_timeout(timeout);
1260 	} while (1);
1261 
1262 	GEM_BUG_ON(!intel_wait_has_seqno(&wait));
1263 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
1264 
1265 	/* Optimistic short spin before touching IRQs */
1266 	if (__i915_spin_request(rq, wait.seqno, state, 5))
1267 		goto complete;
1268 
1269 	set_current_state(state);
1270 	if (intel_engine_add_wait(rq->engine, &wait))
1271 		/*
1272 		 * In order to check that we haven't missed the interrupt
1273 		 * as we enabled it, we need to kick ourselves to do a
1274 		 * coherent check on the seqno before we sleep.
1275 		 */
1276 		goto wakeup;
1277 
1278 	if (flags & I915_WAIT_LOCKED)
1279 		__i915_wait_request_check_and_reset(rq);
1280 
1281 	for (;;) {
1282 		if (signal_pending_state(state, current)) {
1283 			timeout = -ERESTARTSYS;
1284 			break;
1285 		}
1286 
1287 		if (!timeout) {
1288 			timeout = -ETIME;
1289 			break;
1290 		}
1291 
1292 		timeout = io_schedule_timeout(timeout);
1293 
1294 		if (intel_wait_complete(&wait) &&
1295 		    intel_wait_check_request(&wait, rq))
1296 			break;
1297 
1298 		set_current_state(state);
1299 
1300 wakeup:
1301 		/*
1302 		 * Carefully check if the request is complete, giving time
1303 		 * for the seqno to be visible following the interrupt.
1304 		 * We also have to check in case we are kicked by the GPU
1305 		 * reset in order to drop the struct_mutex.
1306 		 */
1307 		if (__i915_request_irq_complete(rq))
1308 			break;
1309 
1310 		/*
1311 		 * If the GPU is hung, and we hold the lock, reset the GPU
1312 		 * and then check for completion. On a full reset, the engine's
1313 		 * HW seqno will be advanced passed us and we are complete.
1314 		 * If we do a partial reset, we have to wait for the GPU to
1315 		 * resume and update the breadcrumb.
1316 		 *
1317 		 * If we don't hold the mutex, we can just wait for the worker
1318 		 * to come along and update the breadcrumb (either directly
1319 		 * itself, or indirectly by recovering the GPU).
1320 		 */
1321 		if (flags & I915_WAIT_LOCKED &&
1322 		    __i915_wait_request_check_and_reset(rq))
1323 			continue;
1324 
1325 		/* Only spin if we know the GPU is processing this request */
1326 		if (__i915_spin_request(rq, wait.seqno, state, 2))
1327 			break;
1328 
1329 		if (!intel_wait_check_request(&wait, rq)) {
1330 			intel_engine_remove_wait(rq->engine, &wait);
1331 			goto restart;
1332 		}
1333 	}
1334 
1335 	intel_engine_remove_wait(rq->engine, &wait);
1336 complete:
1337 	__set_current_state(TASK_RUNNING);
1338 	if (flags & I915_WAIT_LOCKED)
1339 		remove_wait_queue(errq, &reset);
1340 	remove_wait_queue(&rq->execute, &exec);
1341 	trace_i915_request_wait_end(rq);
1342 
1343 	return timeout;
1344 }
1345 
1346 static void ring_retire_requests(struct intel_ring *ring)
1347 {
1348 	struct i915_request *request, *next;
1349 
1350 	list_for_each_entry_safe(request, next,
1351 				 &ring->request_list, ring_link) {
1352 		if (!i915_request_completed(request))
1353 			break;
1354 
1355 		i915_request_retire(request);
1356 	}
1357 }
1358 
1359 void i915_retire_requests(struct drm_i915_private *i915)
1360 {
1361 	struct intel_ring *ring, *tmp;
1362 
1363 	lockdep_assert_held(&i915->drm.struct_mutex);
1364 
1365 	if (!i915->gt.active_requests)
1366 		return;
1367 
1368 	list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1369 		ring_retire_requests(ring);
1370 }
1371 
1372 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1373 #include "selftests/mock_request.c"
1374 #include "selftests/i915_request.c"
1375 #endif
1376