1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <linux/dma-fence-array.h>
8 #include <linux/dma-fence-chain.h>
9 #include <linux/jiffies.h>
10 
11 #include "gt/intel_engine.h"
12 
13 #include "i915_gem_ioctls.h"
14 #include "i915_gem_object.h"
15 
16 static long
17 i915_gem_object_wait_fence(struct dma_fence *fence,
18 			   unsigned int flags,
19 			   long timeout)
20 {
21 	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
22 
23 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
24 		return timeout;
25 
26 	if (dma_fence_is_i915(fence))
27 		return i915_request_wait_timeout(to_request(fence), flags, timeout);
28 
29 	return dma_fence_wait_timeout(fence,
30 				      flags & I915_WAIT_INTERRUPTIBLE,
31 				      timeout);
32 }
33 
34 static long
35 i915_gem_object_wait_reservation(struct dma_resv *resv,
36 				 unsigned int flags,
37 				 long timeout)
38 {
39 	struct dma_resv_iter cursor;
40 	struct dma_fence *fence;
41 	long ret = timeout ?: 1;
42 
43 	dma_resv_iter_begin(&cursor, resv, flags & I915_WAIT_ALL);
44 	dma_resv_for_each_fence_unlocked(&cursor, fence) {
45 		ret = i915_gem_object_wait_fence(fence, flags, timeout);
46 		if (ret <= 0)
47 			break;
48 
49 		if (timeout)
50 			timeout = ret;
51 	}
52 	dma_resv_iter_end(&cursor);
53 
54 	return ret;
55 }
56 
57 static void fence_set_priority(struct dma_fence *fence,
58 			       const struct i915_sched_attr *attr)
59 {
60 	struct i915_request *rq;
61 	struct intel_engine_cs *engine;
62 
63 	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
64 		return;
65 
66 	rq = to_request(fence);
67 	engine = rq->engine;
68 
69 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
70 	if (engine->sched_engine->schedule)
71 		engine->sched_engine->schedule(rq, attr);
72 	rcu_read_unlock();
73 }
74 
75 static inline bool __dma_fence_is_chain(const struct dma_fence *fence)
76 {
77 	return fence->ops == &dma_fence_chain_ops;
78 }
79 
80 void i915_gem_fence_wait_priority(struct dma_fence *fence,
81 				  const struct i915_sched_attr *attr)
82 {
83 	if (dma_fence_is_signaled(fence))
84 		return;
85 
86 	local_bh_disable();
87 
88 	/* Recurse once into a fence-array */
89 	if (dma_fence_is_array(fence)) {
90 		struct dma_fence_array *array = to_dma_fence_array(fence);
91 		int i;
92 
93 		for (i = 0; i < array->num_fences; i++)
94 			fence_set_priority(array->fences[i], attr);
95 	} else if (__dma_fence_is_chain(fence)) {
96 		struct dma_fence *iter;
97 
98 		/* The chain is ordered; if we boost the last, we boost all */
99 		dma_fence_chain_for_each(iter, fence) {
100 			fence_set_priority(to_dma_fence_chain(iter)->fence,
101 					   attr);
102 			break;
103 		}
104 		dma_fence_put(iter);
105 	} else {
106 		fence_set_priority(fence, attr);
107 	}
108 
109 	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
110 }
111 
112 int
113 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
114 			      unsigned int flags,
115 			      const struct i915_sched_attr *attr)
116 {
117 	struct dma_resv_iter cursor;
118 	struct dma_fence *fence;
119 
120 	dma_resv_iter_begin(&cursor, obj->base.resv, flags & I915_WAIT_ALL);
121 	dma_resv_for_each_fence_unlocked(&cursor, fence)
122 		i915_gem_fence_wait_priority(fence, attr);
123 	dma_resv_iter_end(&cursor);
124 	return 0;
125 }
126 
127 /**
128  * Waits for rendering to the object to be completed
129  * @obj: i915 gem object
130  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
131  * @timeout: how long to wait
132  */
133 int
134 i915_gem_object_wait(struct drm_i915_gem_object *obj,
135 		     unsigned int flags,
136 		     long timeout)
137 {
138 	might_sleep();
139 	GEM_BUG_ON(timeout < 0);
140 
141 	timeout = i915_gem_object_wait_reservation(obj->base.resv,
142 						   flags, timeout);
143 
144 	if (timeout < 0)
145 		return timeout;
146 
147 	return !timeout ? -ETIME : 0;
148 }
149 
150 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
151 {
152 	/* nsecs_to_jiffies64() does not guard against overflow */
153 	if (NSEC_PER_SEC % HZ &&
154 	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
155 		return MAX_JIFFY_OFFSET;
156 
157 	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
158 }
159 
160 static unsigned long to_wait_timeout(s64 timeout_ns)
161 {
162 	if (timeout_ns < 0)
163 		return MAX_SCHEDULE_TIMEOUT;
164 
165 	if (timeout_ns == 0)
166 		return 0;
167 
168 	return nsecs_to_jiffies_timeout(timeout_ns);
169 }
170 
171 /**
172  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
173  * @dev: drm device pointer
174  * @data: ioctl data blob
175  * @file: drm file pointer
176  *
177  * Returns 0 if successful, else an error is returned with the remaining time in
178  * the timeout parameter.
179  *  -ETIME: object is still busy after timeout
180  *  -ERESTARTSYS: signal interrupted the wait
181  *  -ENONENT: object doesn't exist
182  * Also possible, but rare:
183  *  -EAGAIN: incomplete, restart syscall
184  *  -ENOMEM: damn
185  *  -ENODEV: Internal IRQ fail
186  *  -E?: The add request failed
187  *
188  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
189  * non-zero timeout parameter the wait ioctl will wait for the given number of
190  * nanoseconds on an object becoming unbusy. Since the wait itself does so
191  * without holding struct_mutex the object may become re-busied before this
192  * function completes. A similar but shorter * race condition exists in the busy
193  * ioctl
194  */
195 int
196 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
197 {
198 	struct drm_i915_gem_wait *args = data;
199 	struct drm_i915_gem_object *obj;
200 	ktime_t start;
201 	long ret;
202 
203 	if (args->flags != 0)
204 		return -EINVAL;
205 
206 	obj = i915_gem_object_lookup(file, args->bo_handle);
207 	if (!obj)
208 		return -ENOENT;
209 
210 	start = ktime_get();
211 
212 	ret = i915_gem_object_wait(obj,
213 				   I915_WAIT_INTERRUPTIBLE |
214 				   I915_WAIT_PRIORITY |
215 				   I915_WAIT_ALL,
216 				   to_wait_timeout(args->timeout_ns));
217 
218 	if (args->timeout_ns > 0) {
219 		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
220 		if (args->timeout_ns < 0)
221 			args->timeout_ns = 0;
222 
223 		/*
224 		 * Apparently ktime isn't accurate enough and occasionally has a
225 		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
226 		 * things up to make the test happy. We allow up to 1 jiffy.
227 		 *
228 		 * This is a regression from the timespec->ktime conversion.
229 		 */
230 		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
231 			args->timeout_ns = 0;
232 
233 		/* Asked to wait beyond the jiffie/scheduler precision? */
234 		if (ret == -ETIME && args->timeout_ns)
235 			ret = -EAGAIN;
236 	}
237 
238 	i915_gem_object_put(obj);
239 	return ret;
240 }
241 
242 /**
243  * i915_gem_object_wait_migration - Sync an accelerated migration operation
244  * @obj: The migrating object.
245  * @flags: waiting flags. Currently supports only I915_WAIT_INTERRUPTIBLE.
246  *
247  * Wait for any pending async migration operation on the object,
248  * whether it's explicitly (i915_gem_object_migrate()) or implicitly
249  * (swapin, initial clearing) initiated.
250  *
251  * Return: 0 if successful, -ERESTARTSYS if a signal was hit during waiting.
252  */
253 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
254 				   unsigned int flags)
255 {
256 	might_sleep();
257 
258 	return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE));
259 }
260