1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/dma-fence-array.h>
8 #include <linux/jiffies.h>
9 
10 #include "gt/intel_engine.h"
11 
12 #include "dma_resv_utils.h"
13 #include "i915_gem_ioctls.h"
14 #include "i915_gem_object.h"
15 
16 static long
17 i915_gem_object_wait_fence(struct dma_fence *fence,
18 			   unsigned int flags,
19 			   long timeout)
20 {
21 	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
22 
23 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
24 		return timeout;
25 
26 	if (dma_fence_is_i915(fence))
27 		return i915_request_wait(to_request(fence), flags, timeout);
28 
29 	return dma_fence_wait_timeout(fence,
30 				      flags & I915_WAIT_INTERRUPTIBLE,
31 				      timeout);
32 }
33 
34 static long
35 i915_gem_object_wait_reservation(struct dma_resv *resv,
36 				 unsigned int flags,
37 				 long timeout)
38 {
39 	struct dma_fence *excl;
40 	bool prune_fences = false;
41 
42 	if (flags & I915_WAIT_ALL) {
43 		struct dma_fence **shared;
44 		unsigned int count, i;
45 		int ret;
46 
47 		ret = dma_resv_get_fences_rcu(resv,
48 							&excl, &count, &shared);
49 		if (ret)
50 			return ret;
51 
52 		for (i = 0; i < count; i++) {
53 			timeout = i915_gem_object_wait_fence(shared[i],
54 							     flags, timeout);
55 			if (timeout < 0)
56 				break;
57 
58 			dma_fence_put(shared[i]);
59 		}
60 
61 		for (; i < count; i++)
62 			dma_fence_put(shared[i]);
63 		kfree(shared);
64 
65 		/*
66 		 * If both shared fences and an exclusive fence exist,
67 		 * then by construction the shared fences must be later
68 		 * than the exclusive fence. If we successfully wait for
69 		 * all the shared fences, we know that the exclusive fence
70 		 * must all be signaled. If all the shared fences are
71 		 * signaled, we can prune the array and recover the
72 		 * floating references on the fences/requests.
73 		 */
74 		prune_fences = count && timeout >= 0;
75 	} else {
76 		excl = dma_resv_get_excl_rcu(resv);
77 	}
78 
79 	if (excl && timeout >= 0)
80 		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
81 
82 	dma_fence_put(excl);
83 
84 	/*
85 	 * Opportunistically prune the fences iff we know they have *all* been
86 	 * signaled.
87 	 */
88 	if (prune_fences)
89 		dma_resv_prune(resv);
90 
91 	return timeout;
92 }
93 
94 static void __fence_set_priority(struct dma_fence *fence,
95 				 const struct i915_sched_attr *attr)
96 {
97 	struct i915_request *rq;
98 	struct intel_engine_cs *engine;
99 
100 	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
101 		return;
102 
103 	rq = to_request(fence);
104 	engine = rq->engine;
105 
106 	local_bh_disable();
107 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
108 	if (engine->schedule)
109 		engine->schedule(rq, attr);
110 	rcu_read_unlock();
111 	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
112 }
113 
114 static void fence_set_priority(struct dma_fence *fence,
115 			       const struct i915_sched_attr *attr)
116 {
117 	/* Recurse once into a fence-array */
118 	if (dma_fence_is_array(fence)) {
119 		struct dma_fence_array *array = to_dma_fence_array(fence);
120 		int i;
121 
122 		for (i = 0; i < array->num_fences; i++)
123 			__fence_set_priority(array->fences[i], attr);
124 	} else {
125 		__fence_set_priority(fence, attr);
126 	}
127 }
128 
129 int
130 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
131 			      unsigned int flags,
132 			      const struct i915_sched_attr *attr)
133 {
134 	struct dma_fence *excl;
135 
136 	if (flags & I915_WAIT_ALL) {
137 		struct dma_fence **shared;
138 		unsigned int count, i;
139 		int ret;
140 
141 		ret = dma_resv_get_fences_rcu(obj->base.resv,
142 							&excl, &count, &shared);
143 		if (ret)
144 			return ret;
145 
146 		for (i = 0; i < count; i++) {
147 			fence_set_priority(shared[i], attr);
148 			dma_fence_put(shared[i]);
149 		}
150 
151 		kfree(shared);
152 	} else {
153 		excl = dma_resv_get_excl_rcu(obj->base.resv);
154 	}
155 
156 	if (excl) {
157 		fence_set_priority(excl, attr);
158 		dma_fence_put(excl);
159 	}
160 	return 0;
161 }
162 
163 /**
164  * Waits for rendering to the object to be completed
165  * @obj: i915 gem object
166  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
167  * @timeout: how long to wait
168  */
169 int
170 i915_gem_object_wait(struct drm_i915_gem_object *obj,
171 		     unsigned int flags,
172 		     long timeout)
173 {
174 	might_sleep();
175 	GEM_BUG_ON(timeout < 0);
176 
177 	timeout = i915_gem_object_wait_reservation(obj->base.resv,
178 						   flags, timeout);
179 	return timeout < 0 ? timeout : 0;
180 }
181 
182 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
183 {
184 	/* nsecs_to_jiffies64() does not guard against overflow */
185 	if (NSEC_PER_SEC % HZ &&
186 	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
187 		return MAX_JIFFY_OFFSET;
188 
189 	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
190 }
191 
192 static unsigned long to_wait_timeout(s64 timeout_ns)
193 {
194 	if (timeout_ns < 0)
195 		return MAX_SCHEDULE_TIMEOUT;
196 
197 	if (timeout_ns == 0)
198 		return 0;
199 
200 	return nsecs_to_jiffies_timeout(timeout_ns);
201 }
202 
203 /**
204  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
205  * @dev: drm device pointer
206  * @data: ioctl data blob
207  * @file: drm file pointer
208  *
209  * Returns 0 if successful, else an error is returned with the remaining time in
210  * the timeout parameter.
211  *  -ETIME: object is still busy after timeout
212  *  -ERESTARTSYS: signal interrupted the wait
213  *  -ENONENT: object doesn't exist
214  * Also possible, but rare:
215  *  -EAGAIN: incomplete, restart syscall
216  *  -ENOMEM: damn
217  *  -ENODEV: Internal IRQ fail
218  *  -E?: The add request failed
219  *
220  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
221  * non-zero timeout parameter the wait ioctl will wait for the given number of
222  * nanoseconds on an object becoming unbusy. Since the wait itself does so
223  * without holding struct_mutex the object may become re-busied before this
224  * function completes. A similar but shorter * race condition exists in the busy
225  * ioctl
226  */
227 int
228 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
229 {
230 	struct drm_i915_gem_wait *args = data;
231 	struct drm_i915_gem_object *obj;
232 	ktime_t start;
233 	long ret;
234 
235 	if (args->flags != 0)
236 		return -EINVAL;
237 
238 	obj = i915_gem_object_lookup(file, args->bo_handle);
239 	if (!obj)
240 		return -ENOENT;
241 
242 	start = ktime_get();
243 
244 	ret = i915_gem_object_wait(obj,
245 				   I915_WAIT_INTERRUPTIBLE |
246 				   I915_WAIT_PRIORITY |
247 				   I915_WAIT_ALL,
248 				   to_wait_timeout(args->timeout_ns));
249 
250 	if (args->timeout_ns > 0) {
251 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
252 		if (args->timeout_ns < 0)
253 			args->timeout_ns = 0;
254 
255 		/*
256 		 * Apparently ktime isn't accurate enough and occasionally has a
257 		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
258 		 * things up to make the test happy. We allow up to 1 jiffy.
259 		 *
260 		 * This is a regression from the timespec->ktime conversion.
261 		 */
262 		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
263 			args->timeout_ns = 0;
264 
265 		/* Asked to wait beyond the jiffie/scheduler precision? */
266 		if (ret == -ETIME && args->timeout_ns)
267 			ret = -EAGAIN;
268 	}
269 
270 	i915_gem_object_put(obj);
271 	return ret;
272 }
273