1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/workqueue.h>
8 
9 #include "i915_drv.h" /* for_each_engine() */
10 #include "i915_request.h"
11 #include "intel_gt.h"
12 #include "intel_gt_pm.h"
13 #include "intel_gt_requests.h"
14 #include "intel_timeline.h"
15 
16 static void retire_requests(struct intel_timeline *tl)
17 {
18 	struct i915_request *rq, *rn;
19 
20 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
21 		if (!i915_request_retire(rq))
22 			break;
23 }
24 
25 static void flush_submission(struct intel_gt *gt)
26 {
27 	struct intel_engine_cs *engine;
28 	enum intel_engine_id id;
29 
30 	for_each_engine(engine, gt, id)
31 		intel_engine_flush_submission(engine);
32 }
33 
34 static void engine_retire(struct work_struct *work)
35 {
36 	struct intel_engine_cs *engine =
37 		container_of(work, typeof(*engine), retire_work);
38 	struct intel_timeline *tl = xchg(&engine->retire, NULL);
39 
40 	do {
41 		struct intel_timeline *next = xchg(&tl->retire, NULL);
42 
43 		/*
44 		 * Our goal here is to retire _idle_ timelines as soon as
45 		 * possible (as they are idle, we do not expect userspace
46 		 * to be cleaning up anytime soon).
47 		 *
48 		 * If the timeline is currently locked, either it is being
49 		 * retired elsewhere or about to be!
50 		 */
51 		if (mutex_trylock(&tl->mutex)) {
52 			retire_requests(tl);
53 			mutex_unlock(&tl->mutex);
54 		}
55 		intel_timeline_put(tl);
56 
57 		GEM_BUG_ON(!next);
58 		tl = ptr_mask_bits(next, 1);
59 	} while (tl);
60 }
61 
62 static bool add_retire(struct intel_engine_cs *engine,
63 		       struct intel_timeline *tl)
64 {
65 	struct intel_timeline *first;
66 
67 	/*
68 	 * We open-code a llist here to include the additional tag [BIT(0)]
69 	 * so that we know when the timeline is already on a
70 	 * retirement queue: either this engine or another.
71 	 *
72 	 * However, we rely on that a timeline can only be active on a single
73 	 * engine at any one time and that add_retire() is called before the
74 	 * engine releases the timeline and transferred to another to retire.
75 	 */
76 
77 	if (READ_ONCE(tl->retire)) /* already queued */
78 		return false;
79 
80 	intel_timeline_get(tl);
81 	first = READ_ONCE(engine->retire);
82 	do
83 		tl->retire = ptr_pack_bits(first, 1, 1);
84 	while (!try_cmpxchg(&engine->retire, &first, tl));
85 
86 	return !first;
87 }
88 
89 void intel_engine_add_retire(struct intel_engine_cs *engine,
90 			     struct intel_timeline *tl)
91 {
92 	if (add_retire(engine, tl))
93 		schedule_work(&engine->retire_work);
94 }
95 
96 void intel_engine_init_retire(struct intel_engine_cs *engine)
97 {
98 	INIT_WORK(&engine->retire_work, engine_retire);
99 }
100 
101 void intel_engine_fini_retire(struct intel_engine_cs *engine)
102 {
103 	flush_work(&engine->retire_work);
104 	GEM_BUG_ON(engine->retire);
105 }
106 
107 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
108 {
109 	struct intel_gt_timelines *timelines = &gt->timelines;
110 	struct intel_timeline *tl, *tn;
111 	unsigned long active_count = 0;
112 	unsigned long flags;
113 	bool interruptible;
114 	LIST_HEAD(free);
115 
116 	interruptible = true;
117 	if (unlikely(timeout < 0))
118 		timeout = -timeout, interruptible = false;
119 
120 	flush_submission(gt); /* kick the ksoftirqd tasklets */
121 
122 	spin_lock_irqsave(&timelines->lock, flags);
123 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
124 		if (!mutex_trylock(&tl->mutex)) {
125 			active_count++; /* report busy to caller, try again? */
126 			continue;
127 		}
128 
129 		intel_timeline_get(tl);
130 		GEM_BUG_ON(!atomic_read(&tl->active_count));
131 		atomic_inc(&tl->active_count); /* pin the list element */
132 		spin_unlock_irqrestore(&timelines->lock, flags);
133 
134 		if (timeout > 0) {
135 			struct dma_fence *fence;
136 
137 			fence = i915_active_fence_get(&tl->last_request);
138 			if (fence) {
139 				timeout = dma_fence_wait_timeout(fence,
140 								 interruptible,
141 								 timeout);
142 				dma_fence_put(fence);
143 			}
144 		}
145 
146 		retire_requests(tl);
147 
148 		spin_lock_irqsave(&timelines->lock, flags);
149 
150 		/* Resume iteration after dropping lock */
151 		list_safe_reset_next(tl, tn, link);
152 		if (atomic_dec_and_test(&tl->active_count))
153 			list_del(&tl->link);
154 		else
155 			active_count += !!rcu_access_pointer(tl->last_request.fence);
156 
157 		mutex_unlock(&tl->mutex);
158 
159 		/* Defer the final release to after the spinlock */
160 		if (refcount_dec_and_test(&tl->kref.refcount)) {
161 			GEM_BUG_ON(atomic_read(&tl->active_count));
162 			list_add(&tl->link, &free);
163 		}
164 	}
165 	spin_unlock_irqrestore(&timelines->lock, flags);
166 
167 	list_for_each_entry_safe(tl, tn, &free, link)
168 		__intel_timeline_free(&tl->kref);
169 
170 	return active_count ? timeout : 0;
171 }
172 
173 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
174 {
175 	/* If the device is asleep, we have no requests outstanding */
176 	if (!intel_gt_pm_is_awake(gt))
177 		return 0;
178 
179 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
180 		cond_resched();
181 		if (signal_pending(current))
182 			return -EINTR;
183 	}
184 
185 	return timeout;
186 }
187 
188 static void retire_work_handler(struct work_struct *work)
189 {
190 	struct intel_gt *gt =
191 		container_of(work, typeof(*gt), requests.retire_work.work);
192 
193 	intel_gt_retire_requests(gt);
194 	schedule_delayed_work(&gt->requests.retire_work,
195 			      round_jiffies_up_relative(HZ));
196 }
197 
198 void intel_gt_init_requests(struct intel_gt *gt)
199 {
200 	INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
201 }
202 
203 void intel_gt_park_requests(struct intel_gt *gt)
204 {
205 	cancel_delayed_work(&gt->requests.retire_work);
206 }
207 
208 void intel_gt_unpark_requests(struct intel_gt *gt)
209 {
210 	schedule_delayed_work(&gt->requests.retire_work,
211 			      round_jiffies_up_relative(HZ));
212 }
213