1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_drv.h" /* for_each_engine() */
8 #include "i915_request.h"
9 #include "intel_gt.h"
10 #include "intel_gt_pm.h"
11 #include "intel_gt_requests.h"
12 #include "intel_timeline.h"
13 
14 static void retire_requests(struct intel_timeline *tl)
15 {
16 	struct i915_request *rq, *rn;
17 
18 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
19 		if (!i915_request_retire(rq))
20 			break;
21 }
22 
23 static void flush_submission(struct intel_gt *gt)
24 {
25 	struct intel_engine_cs *engine;
26 	enum intel_engine_id id;
27 
28 	for_each_engine(engine, gt, id)
29 		intel_engine_flush_submission(engine);
30 }
31 
32 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
33 {
34 	struct intel_gt_timelines *timelines = &gt->timelines;
35 	struct intel_timeline *tl, *tn;
36 	unsigned long active_count = 0;
37 	unsigned long flags;
38 	bool interruptible;
39 	LIST_HEAD(free);
40 
41 	interruptible = true;
42 	if (unlikely(timeout < 0))
43 		timeout = -timeout, interruptible = false;
44 
45 	flush_submission(gt); /* kick the ksoftirqd tasklets */
46 
47 	spin_lock_irqsave(&timelines->lock, flags);
48 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
49 		if (!mutex_trylock(&tl->mutex)) {
50 			active_count++; /* report busy to caller, try again? */
51 			continue;
52 		}
53 
54 		intel_timeline_get(tl);
55 		GEM_BUG_ON(!tl->active_count);
56 		tl->active_count++; /* pin the list element */
57 		spin_unlock_irqrestore(&timelines->lock, flags);
58 
59 		if (timeout > 0) {
60 			struct dma_fence *fence;
61 
62 			fence = i915_active_fence_get(&tl->last_request);
63 			if (fence) {
64 				timeout = dma_fence_wait_timeout(fence,
65 								 interruptible,
66 								 timeout);
67 				dma_fence_put(fence);
68 			}
69 		}
70 
71 		retire_requests(tl);
72 
73 		spin_lock_irqsave(&timelines->lock, flags);
74 
75 		/* Resume iteration after dropping lock */
76 		list_safe_reset_next(tl, tn, link);
77 		if (--tl->active_count)
78 			active_count += !!rcu_access_pointer(tl->last_request.fence);
79 		else
80 			list_del(&tl->link);
81 
82 		mutex_unlock(&tl->mutex);
83 
84 		/* Defer the final release to after the spinlock */
85 		if (refcount_dec_and_test(&tl->kref.refcount)) {
86 			GEM_BUG_ON(tl->active_count);
87 			list_add(&tl->link, &free);
88 		}
89 	}
90 	spin_unlock_irqrestore(&timelines->lock, flags);
91 
92 	list_for_each_entry_safe(tl, tn, &free, link)
93 		__intel_timeline_free(&tl->kref);
94 
95 	return active_count ? timeout : 0;
96 }
97 
98 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
99 {
100 	/* If the device is asleep, we have no requests outstanding */
101 	if (!intel_gt_pm_is_awake(gt))
102 		return 0;
103 
104 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
105 		cond_resched();
106 		if (signal_pending(current))
107 			return -EINTR;
108 	}
109 
110 	return timeout;
111 }
112 
113 static void retire_work_handler(struct work_struct *work)
114 {
115 	struct intel_gt *gt =
116 		container_of(work, typeof(*gt), requests.retire_work.work);
117 
118 	intel_gt_retire_requests(gt);
119 	schedule_delayed_work(&gt->requests.retire_work,
120 			      round_jiffies_up_relative(HZ));
121 }
122 
123 void intel_gt_init_requests(struct intel_gt *gt)
124 {
125 	INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
126 }
127 
128 void intel_gt_park_requests(struct intel_gt *gt)
129 {
130 	cancel_delayed_work(&gt->requests.retire_work);
131 }
132 
133 void intel_gt_unpark_requests(struct intel_gt *gt)
134 {
135 	schedule_delayed_work(&gt->requests.retire_work,
136 			      round_jiffies_up_relative(HZ));
137 }
138