1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <drm/drmP.h>
28 
29 #include <linux/ktime.h>
30 #include <linux/hrtimer.h>
31 
32 #include <nvif/notify.h>
33 #include <nvif/event.h>
34 
35 #include "nouveau_drm.h"
36 #include "nouveau_dma.h"
37 #include "nouveau_fence.h"
38 
39 struct fence_work {
40 	struct work_struct base;
41 	struct list_head head;
42 	void (*func)(void *);
43 	void *data;
44 };
45 
46 static void
47 nouveau_fence_signal(struct nouveau_fence *fence)
48 {
49 	struct fence_work *work, *temp;
50 
51 	list_for_each_entry_safe(work, temp, &fence->work, head) {
52 		schedule_work(&work->base);
53 		list_del(&work->head);
54 	}
55 
56 	fence->channel = NULL;
57 	list_del(&fence->head);
58 }
59 
60 void
61 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
62 {
63 	struct nouveau_fence *fence, *fnext;
64 	spin_lock(&fctx->lock);
65 	list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
66 		nouveau_fence_signal(fence);
67 	}
68 	spin_unlock(&fctx->lock);
69 }
70 
71 void
72 nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
73 {
74 	INIT_LIST_HEAD(&fctx->flip);
75 	INIT_LIST_HEAD(&fctx->pending);
76 	spin_lock_init(&fctx->lock);
77 }
78 
79 static void
80 nouveau_fence_work_handler(struct work_struct *kwork)
81 {
82 	struct fence_work *work = container_of(kwork, typeof(*work), base);
83 	work->func(work->data);
84 	kfree(work);
85 }
86 
87 void
88 nouveau_fence_work(struct nouveau_fence *fence,
89 		   void (*func)(void *), void *data)
90 {
91 	struct nouveau_channel *chan = fence->channel;
92 	struct nouveau_fence_chan *fctx;
93 	struct fence_work *work = NULL;
94 
95 	if (nouveau_fence_done(fence)) {
96 		func(data);
97 		return;
98 	}
99 
100 	fctx = chan->fence;
101 	work = kmalloc(sizeof(*work), GFP_KERNEL);
102 	if (!work) {
103 		WARN_ON(nouveau_fence_wait(fence, false, false));
104 		func(data);
105 		return;
106 	}
107 
108 	spin_lock(&fctx->lock);
109 	if (!fence->channel) {
110 		spin_unlock(&fctx->lock);
111 		kfree(work);
112 		func(data);
113 		return;
114 	}
115 
116 	INIT_WORK(&work->base, nouveau_fence_work_handler);
117 	work->func = func;
118 	work->data = data;
119 	list_add(&work->head, &fence->work);
120 	spin_unlock(&fctx->lock);
121 }
122 
123 static void
124 nouveau_fence_update(struct nouveau_channel *chan)
125 {
126 	struct nouveau_fence_chan *fctx = chan->fence;
127 	struct nouveau_fence *fence, *fnext;
128 
129 	spin_lock(&fctx->lock);
130 	list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
131 		if (fctx->read(chan) < fence->sequence)
132 			break;
133 
134 		nouveau_fence_signal(fence);
135 		nouveau_fence_unref(&fence);
136 	}
137 	spin_unlock(&fctx->lock);
138 }
139 
140 int
141 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
142 {
143 	struct nouveau_fence_chan *fctx = chan->fence;
144 	int ret;
145 
146 	fence->channel  = chan;
147 	fence->timeout  = jiffies + (15 * HZ);
148 	fence->sequence = ++fctx->sequence;
149 
150 	ret = fctx->emit(fence);
151 	if (!ret) {
152 		kref_get(&fence->kref);
153 		spin_lock(&fctx->lock);
154 		list_add_tail(&fence->head, &fctx->pending);
155 		spin_unlock(&fctx->lock);
156 	}
157 
158 	return ret;
159 }
160 
161 bool
162 nouveau_fence_done(struct nouveau_fence *fence)
163 {
164 	if (fence->channel)
165 		nouveau_fence_update(fence->channel);
166 	return !fence->channel;
167 }
168 
169 struct nouveau_fence_wait {
170 	struct nouveau_fence_priv *priv;
171 	struct nvif_notify notify;
172 };
173 
174 static int
175 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
176 {
177 	struct nouveau_fence_wait *wait =
178 		container_of(notify, typeof(*wait), notify);
179 	wake_up_all(&wait->priv->waiting);
180 	return NVIF_NOTIFY_KEEP;
181 }
182 
183 static int
184 nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
185 
186 {
187 	struct nouveau_channel *chan = fence->channel;
188 	struct nouveau_fence_priv *priv = chan->drm->fence;
189 	struct nouveau_fence_wait wait = { .priv = priv };
190 	int ret = 0;
191 
192 	ret = nvif_notify_init(chan->object, NULL,
193 			       nouveau_fence_wait_uevent_handler, false,
194 			       G82_CHANNEL_DMA_V0_NTFY_UEVENT,
195 			       &(struct nvif_notify_uevent_req) {
196 			       },
197 			       sizeof(struct nvif_notify_uevent_req),
198 			       sizeof(struct nvif_notify_uevent_rep),
199 			       &wait.notify);
200 	if (ret)
201 		return ret;
202 
203 	nvif_notify_get(&wait.notify);
204 
205 	if (fence->timeout) {
206 		unsigned long timeout = fence->timeout - jiffies;
207 
208 		if (time_before(jiffies, fence->timeout)) {
209 			if (intr) {
210 				ret = wait_event_interruptible_timeout(
211 						priv->waiting,
212 						nouveau_fence_done(fence),
213 						timeout);
214 			} else {
215 				ret = wait_event_timeout(priv->waiting,
216 						nouveau_fence_done(fence),
217 						timeout);
218 			}
219 		}
220 
221 		if (ret >= 0) {
222 			fence->timeout = jiffies + ret;
223 			if (time_after_eq(jiffies, fence->timeout))
224 				ret = -EBUSY;
225 		}
226 	} else {
227 		if (intr) {
228 			ret = wait_event_interruptible(priv->waiting,
229 					nouveau_fence_done(fence));
230 		} else {
231 			wait_event(priv->waiting, nouveau_fence_done(fence));
232 		}
233 	}
234 
235 	nvif_notify_fini(&wait.notify);
236 	if (unlikely(ret < 0))
237 		return ret;
238 
239 	return 0;
240 }
241 
242 int
243 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
244 {
245 	struct nouveau_channel *chan = fence->channel;
246 	struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
247 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
248 	ktime_t t;
249 	int ret = 0;
250 
251 	while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
252 		ret = nouveau_fence_wait_uevent(fence, intr);
253 		if (ret < 0)
254 			return ret;
255 	}
256 
257 	while (!nouveau_fence_done(fence)) {
258 		if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
259 			ret = -EBUSY;
260 			break;
261 		}
262 
263 		__set_current_state(intr ? TASK_INTERRUPTIBLE :
264 					   TASK_UNINTERRUPTIBLE);
265 		if (lazy) {
266 			t = ktime_set(0, sleep_time);
267 			schedule_hrtimeout(&t, HRTIMER_MODE_REL);
268 			sleep_time *= 2;
269 			if (sleep_time > NSEC_PER_MSEC)
270 				sleep_time = NSEC_PER_MSEC;
271 		}
272 
273 		if (intr && signal_pending(current)) {
274 			ret = -ERESTARTSYS;
275 			break;
276 		}
277 	}
278 
279 	__set_current_state(TASK_RUNNING);
280 	return ret;
281 }
282 
283 int
284 nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
285 {
286 	struct nouveau_fence_chan *fctx = chan->fence;
287 	struct nouveau_channel *prev;
288 	int ret = 0;
289 
290 	prev = fence ? fence->channel : NULL;
291 	if (prev) {
292 		if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
293 			ret = fctx->sync(fence, prev, chan);
294 			if (unlikely(ret))
295 				ret = nouveau_fence_wait(fence, true, false);
296 		}
297 	}
298 
299 	return ret;
300 }
301 
302 static void
303 nouveau_fence_del(struct kref *kref)
304 {
305 	struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
306 	kfree(fence);
307 }
308 
309 void
310 nouveau_fence_unref(struct nouveau_fence **pfence)
311 {
312 	if (*pfence)
313 		kref_put(&(*pfence)->kref, nouveau_fence_del);
314 	*pfence = NULL;
315 }
316 
317 struct nouveau_fence *
318 nouveau_fence_ref(struct nouveau_fence *fence)
319 {
320 	if (fence)
321 		kref_get(&fence->kref);
322 	return fence;
323 }
324 
325 int
326 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
327 		  struct nouveau_fence **pfence)
328 {
329 	struct nouveau_fence *fence;
330 	int ret = 0;
331 
332 	if (unlikely(!chan->fence))
333 		return -ENODEV;
334 
335 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
336 	if (!fence)
337 		return -ENOMEM;
338 
339 	INIT_LIST_HEAD(&fence->work);
340 	fence->sysmem = sysmem;
341 	kref_init(&fence->kref);
342 
343 	ret = nouveau_fence_emit(fence, chan);
344 	if (ret)
345 		nouveau_fence_unref(&fence);
346 
347 	*pfence = fence;
348 	return ret;
349 }
350