1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/dma-fence-array.h>
8 #include <linux/dma-fence-chain.h>
9 #include <linux/jiffies.h>
10 
11 #include "gt/intel_engine.h"
12 
13 #include "dma_resv_utils.h"
14 #include "i915_gem_ioctls.h"
15 #include "i915_gem_object.h"
16 
17 static long
18 i915_gem_object_wait_fence(struct dma_fence *fence,
19 			   unsigned int flags,
20 			   long timeout)
21 {
22 	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
23 
24 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
25 		return timeout;
26 
27 	if (dma_fence_is_i915(fence))
28 		return i915_request_wait(to_request(fence), flags, timeout);
29 
30 	return dma_fence_wait_timeout(fence,
31 				      flags & I915_WAIT_INTERRUPTIBLE,
32 				      timeout);
33 }
34 
35 static long
36 i915_gem_object_wait_reservation(struct dma_resv *resv,
37 				 unsigned int flags,
38 				 long timeout)
39 {
40 	struct dma_fence *excl;
41 	bool prune_fences = false;
42 
43 	if (flags & I915_WAIT_ALL) {
44 		struct dma_fence **shared;
45 		unsigned int count, i;
46 		int ret;
47 
48 		ret = dma_resv_get_fences(resv, &excl, &count, &shared);
49 		if (ret)
50 			return ret;
51 
52 		for (i = 0; i < count; i++) {
53 			timeout = i915_gem_object_wait_fence(shared[i],
54 							     flags, timeout);
55 			if (timeout < 0)
56 				break;
57 
58 			dma_fence_put(shared[i]);
59 		}
60 
61 		for (; i < count; i++)
62 			dma_fence_put(shared[i]);
63 		kfree(shared);
64 
65 		/*
66 		 * If both shared fences and an exclusive fence exist,
67 		 * then by construction the shared fences must be later
68 		 * than the exclusive fence. If we successfully wait for
69 		 * all the shared fences, we know that the exclusive fence
70 		 * must all be signaled. If all the shared fences are
71 		 * signaled, we can prune the array and recover the
72 		 * floating references on the fences/requests.
73 		 */
74 		prune_fences = count && timeout >= 0;
75 	} else {
76 		excl = dma_resv_get_excl_unlocked(resv);
77 	}
78 
79 	if (excl && timeout >= 0)
80 		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
81 
82 	dma_fence_put(excl);
83 
84 	/*
85 	 * Opportunistically prune the fences iff we know they have *all* been
86 	 * signaled.
87 	 */
88 	if (prune_fences)
89 		dma_resv_prune(resv);
90 
91 	return timeout;
92 }
93 
94 static void fence_set_priority(struct dma_fence *fence,
95 			       const struct i915_sched_attr *attr)
96 {
97 	struct i915_request *rq;
98 	struct intel_engine_cs *engine;
99 
100 	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
101 		return;
102 
103 	rq = to_request(fence);
104 	engine = rq->engine;
105 
106 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
107 	if (engine->sched_engine->schedule)
108 		engine->sched_engine->schedule(rq, attr);
109 	rcu_read_unlock();
110 }
111 
112 static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
113 {
114 	return fence->ops == &dma_fence_chain_ops;
115 }
116 
117 void i915_gem_fence_wait_priority(struct dma_fence *fence,
118 				  const struct i915_sched_attr *attr)
119 {
120 	if (dma_fence_is_signaled(fence))
121 		return;
122 
123 	local_bh_disable();
124 
125 	/* Recurse once into a fence-array */
126 	if (dma_fence_is_array(fence)) {
127 		struct dma_fence_array *array = to_dma_fence_array(fence);
128 		int i;
129 
130 		for (i = 0; i < array->num_fences; i++)
131 			fence_set_priority(array->fences[i], attr);
132 	} else if (__dma_fence_is_chain(fence)) {
133 		struct dma_fence *iter;
134 
135 		/* The chain is ordered; if we boost the last, we boost all */
136 		dma_fence_chain_for_each(iter, fence) {
137 			fence_set_priority(to_dma_fence_chain(iter)->fence,
138 					   attr);
139 			break;
140 		}
141 		dma_fence_put(iter);
142 	} else {
143 		fence_set_priority(fence, attr);
144 	}
145 
146 	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
147 }
148 
149 int
150 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
151 			      unsigned int flags,
152 			      const struct i915_sched_attr *attr)
153 {
154 	struct dma_fence *excl;
155 
156 	if (flags & I915_WAIT_ALL) {
157 		struct dma_fence **shared;
158 		unsigned int count, i;
159 		int ret;
160 
161 		ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
162 					  &shared);
163 		if (ret)
164 			return ret;
165 
166 		for (i = 0; i < count; i++) {
167 			i915_gem_fence_wait_priority(shared[i], attr);
168 			dma_fence_put(shared[i]);
169 		}
170 
171 		kfree(shared);
172 	} else {
173 		excl = dma_resv_get_excl_unlocked(obj->base.resv);
174 	}
175 
176 	if (excl) {
177 		i915_gem_fence_wait_priority(excl, attr);
178 		dma_fence_put(excl);
179 	}
180 	return 0;
181 }
182 
183 /**
184  * Waits for rendering to the object to be completed
185  * @obj: i915 gem object
186  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
187  * @timeout: how long to wait
188  */
189 int
190 i915_gem_object_wait(struct drm_i915_gem_object *obj,
191 		     unsigned int flags,
192 		     long timeout)
193 {
194 	might_sleep();
195 	GEM_BUG_ON(timeout < 0);
196 
197 	timeout = i915_gem_object_wait_reservation(obj->base.resv,
198 						   flags, timeout);
199 	return timeout < 0 ? timeout : 0;
200 }
201 
202 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
203 {
204 	/* nsecs_to_jiffies64() does not guard against overflow */
205 	if (NSEC_PER_SEC % HZ &&
206 	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
207 		return MAX_JIFFY_OFFSET;
208 
209 	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
210 }
211 
212 static unsigned long to_wait_timeout(s64 timeout_ns)
213 {
214 	if (timeout_ns < 0)
215 		return MAX_SCHEDULE_TIMEOUT;
216 
217 	if (timeout_ns == 0)
218 		return 0;
219 
220 	return nsecs_to_jiffies_timeout(timeout_ns);
221 }
222 
223 /**
224  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
225  * @dev: drm device pointer
226  * @data: ioctl data blob
227  * @file: drm file pointer
228  *
229  * Returns 0 if successful, else an error is returned with the remaining time in
230  * the timeout parameter.
231  *  -ETIME: object is still busy after timeout
232  *  -ERESTARTSYS: signal interrupted the wait
233  *  -ENONENT: object doesn't exist
234  * Also possible, but rare:
235  *  -EAGAIN: incomplete, restart syscall
236  *  -ENOMEM: damn
237  *  -ENODEV: Internal IRQ fail
238  *  -E?: The add request failed
239  *
240  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
241  * non-zero timeout parameter the wait ioctl will wait for the given number of
242  * nanoseconds on an object becoming unbusy. Since the wait itself does so
243  * without holding struct_mutex the object may become re-busied before this
244  * function completes. A similar but shorter * race condition exists in the busy
245  * ioctl
246  */
247 int
248 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
249 {
250 	struct drm_i915_gem_wait *args = data;
251 	struct drm_i915_gem_object *obj;
252 	ktime_t start;
253 	long ret;
254 
255 	if (args->flags != 0)
256 		return -EINVAL;
257 
258 	obj = i915_gem_object_lookup(file, args->bo_handle);
259 	if (!obj)
260 		return -ENOENT;
261 
262 	start = ktime_get();
263 
264 	ret = i915_gem_object_wait(obj,
265 				   I915_WAIT_INTERRUPTIBLE |
266 				   I915_WAIT_PRIORITY |
267 				   I915_WAIT_ALL,
268 				   to_wait_timeout(args->timeout_ns));
269 
270 	if (args->timeout_ns > 0) {
271 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
272 		if (args->timeout_ns < 0)
273 			args->timeout_ns = 0;
274 
275 		/*
276 		 * Apparently ktime isn't accurate enough and occasionally has a
277 		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
278 		 * things up to make the test happy. We allow up to 1 jiffy.
279 		 *
280 		 * This is a regression from the timespec->ktime conversion.
281 		 */
282 		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
283 			args->timeout_ns = 0;
284 
285 		/* Asked to wait beyond the jiffie/scheduler precision? */
286 		if (ret == -ETIME && args->timeout_ns)
287 			ret = -EAGAIN;
288 	}
289 
290 	i915_gem_object_put(obj);
291 	return ret;
292 }
293 
294 /**
295  * i915_gem_object_wait_migration - Sync an accelerated migration operation
296  * @obj: The migrating object.
297  * @flags: waiting flags. Currently supports only I915_WAIT_INTERRUPTIBLE.
298  *
299  * Wait for any pending async migration operation on the object,
300  * whether it's explicitly (i915_gem_object_migrate()) or implicitly
301  * (swapin, initial clearing) initiated.
302  *
303  * Return: 0 if successful, -ERESTARTSYS if a signal was hit during waiting.
304  */
305 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
306 				   unsigned int flags)
307 {
308 	might_sleep();
309 	/* NOP for now. */
310 	return 0;
311 }
312