1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <drm/drmP.h>
28 
29 #include <linux/ktime.h>
30 #include <linux/hrtimer.h>
31 #include <trace/events/fence.h>
32 
33 #include <nvif/notify.h>
34 #include <nvif/event.h>
35 
36 #include "nouveau_drm.h"
37 #include "nouveau_dma.h"
38 #include "nouveau_fence.h"
39 
40 static const struct fence_ops nouveau_fence_ops_uevent;
41 static const struct fence_ops nouveau_fence_ops_legacy;
42 
43 static inline struct nouveau_fence *
44 from_fence(struct fence *fence)
45 {
46 	return container_of(fence, struct nouveau_fence, base);
47 }
48 
49 static inline struct nouveau_fence_chan *
50 nouveau_fctx(struct nouveau_fence *fence)
51 {
52 	return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53 }
54 
55 static void
56 nouveau_fence_signal(struct nouveau_fence *fence)
57 {
58 	fence_signal_locked(&fence->base);
59 	list_del(&fence->head);
60 
61 	if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 
64 		if (!--fctx->notify_ref)
65 			nvif_notify_put(&fctx->notify);
66 	}
67 
68 	fence_put(&fence->base);
69 }
70 
71 static struct nouveau_fence *
72 nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
73 	struct nouveau_fence_priv *priv = (void*)drm->fence;
74 
75 	if (fence->ops != &nouveau_fence_ops_legacy &&
76 	    fence->ops != &nouveau_fence_ops_uevent)
77 		return NULL;
78 
79 	if (fence->context < priv->context_base ||
80 	    fence->context >= priv->context_base + priv->contexts)
81 		return NULL;
82 
83 	return from_fence(fence);
84 }
85 
86 void
87 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88 {
89 	struct nouveau_fence *fence;
90 
91 	nvif_notify_fini(&fctx->notify);
92 
93 	spin_lock_irq(&fctx->lock);
94 	while (!list_empty(&fctx->pending)) {
95 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 
97 		nouveau_fence_signal(fence);
98 		fence->channel = NULL;
99 	}
100 	spin_unlock_irq(&fctx->lock);
101 }
102 
103 static void
104 nouveau_fence_context_put(struct kref *fence_ref)
105 {
106 	kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
107 }
108 
109 void
110 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
111 {
112 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113 }
114 
115 static void
116 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117 {
118 	struct nouveau_fence *fence;
119 
120 	u32 seq = fctx->read(chan);
121 
122 	while (!list_empty(&fctx->pending)) {
123 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 
125 		if ((int)(seq - fence->base.seqno) < 0)
126 			return;
127 
128 		nouveau_fence_signal(fence);
129 	}
130 }
131 
132 static int
133 nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
134 {
135 	struct nouveau_fence_chan *fctx =
136 		container_of(notify, typeof(*fctx), notify);
137 	unsigned long flags;
138 
139 	spin_lock_irqsave(&fctx->lock, flags);
140 	if (!list_empty(&fctx->pending)) {
141 		struct nouveau_fence *fence;
142 
143 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 		nouveau_fence_update(fence->channel, fctx);
145 	}
146 	spin_unlock_irqrestore(&fctx->lock, flags);
147 
148 	/* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
149 	return NVIF_NOTIFY_KEEP;
150 }
151 
152 void
153 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
154 {
155 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
156 	struct nouveau_cli *cli = (void *)nvif_client(chan->object);
157 	int ret;
158 
159 	INIT_LIST_HEAD(&fctx->flip);
160 	INIT_LIST_HEAD(&fctx->pending);
161 	spin_lock_init(&fctx->lock);
162 	fctx->context = priv->context_base + chan->chid;
163 
164 	if (chan == chan->drm->cechan)
165 		strcpy(fctx->name, "copy engine channel");
166 	else if (chan == chan->drm->channel)
167 		strcpy(fctx->name, "generic kernel channel");
168 	else
169 		strcpy(fctx->name, nvkm_client(&cli->base)->name);
170 
171 	kref_init(&fctx->fence_ref);
172 	if (!priv->uevent)
173 		return;
174 
175 	ret = nvif_notify_init(chan->object, NULL,
176 			 nouveau_fence_wait_uevent_handler, false,
177 			 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
178 			 &(struct nvif_notify_uevent_req) { },
179 			 sizeof(struct nvif_notify_uevent_req),
180 			 sizeof(struct nvif_notify_uevent_rep),
181 			 &fctx->notify);
182 
183 	WARN_ON(ret);
184 }
185 
186 struct nouveau_fence_work {
187 	struct work_struct work;
188 	struct fence_cb cb;
189 	void (*func)(void *);
190 	void *data;
191 };
192 
193 static void
194 nouveau_fence_work_handler(struct work_struct *kwork)
195 {
196 	struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
197 	work->func(work->data);
198 	kfree(work);
199 }
200 
201 static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
202 {
203 	struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
204 
205 	schedule_work(&work->work);
206 }
207 
208 void
209 nouveau_fence_work(struct fence *fence,
210 		   void (*func)(void *), void *data)
211 {
212 	struct nouveau_fence_work *work;
213 
214 	if (fence_is_signaled(fence))
215 		goto err;
216 
217 	work = kmalloc(sizeof(*work), GFP_KERNEL);
218 	if (!work) {
219 		/*
220 		 * this might not be a nouveau fence any more,
221 		 * so force a lazy wait here
222 		 */
223 		WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
224 					   true, false));
225 		goto err;
226 	}
227 
228 	INIT_WORK(&work->work, nouveau_fence_work_handler);
229 	work->func = func;
230 	work->data = data;
231 
232 	if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
233 		goto err_free;
234 	return;
235 
236 err_free:
237 	kfree(work);
238 err:
239 	func(data);
240 }
241 
242 int
243 nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
244 {
245 	struct nouveau_fence_chan *fctx = chan->fence;
246 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
247 	int ret;
248 
249 	fence->channel  = chan;
250 	fence->timeout  = jiffies + (15 * HZ);
251 
252 	if (priv->uevent)
253 		fence_init(&fence->base, &nouveau_fence_ops_uevent,
254 			   &fctx->lock, fctx->context, ++fctx->sequence);
255 	else
256 		fence_init(&fence->base, &nouveau_fence_ops_legacy,
257 			   &fctx->lock, fctx->context, ++fctx->sequence);
258 	kref_get(&fctx->fence_ref);
259 
260 	trace_fence_emit(&fence->base);
261 	ret = fctx->emit(fence);
262 	if (!ret) {
263 		fence_get(&fence->base);
264 		spin_lock_irq(&fctx->lock);
265 		nouveau_fence_update(chan, fctx);
266 		list_add_tail(&fence->head, &fctx->pending);
267 		spin_unlock_irq(&fctx->lock);
268 	}
269 
270 	return ret;
271 }
272 
273 bool
274 nouveau_fence_done(struct nouveau_fence *fence)
275 {
276 	if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 	    fence->base.ops == &nouveau_fence_ops_uevent) {
278 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
279 		unsigned long flags;
280 
281 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 			return true;
283 
284 		spin_lock_irqsave(&fctx->lock, flags);
285 		nouveau_fence_update(fence->channel, fctx);
286 		spin_unlock_irqrestore(&fctx->lock, flags);
287 	}
288 	return fence_is_signaled(&fence->base);
289 }
290 
291 static long
292 nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
293 {
294 	struct nouveau_fence *fence = from_fence(f);
295 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
296 	unsigned long t = jiffies, timeout = t + wait;
297 
298 	while (!nouveau_fence_done(fence)) {
299 		ktime_t kt;
300 
301 		t = jiffies;
302 
303 		if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
304 			__set_current_state(TASK_RUNNING);
305 			return 0;
306 		}
307 
308 		__set_current_state(intr ? TASK_INTERRUPTIBLE :
309 					   TASK_UNINTERRUPTIBLE);
310 
311 		kt = ktime_set(0, sleep_time);
312 		schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
313 		sleep_time *= 2;
314 		if (sleep_time > NSEC_PER_MSEC)
315 			sleep_time = NSEC_PER_MSEC;
316 
317 		if (intr && signal_pending(current))
318 			return -ERESTARTSYS;
319 	}
320 
321 	__set_current_state(TASK_RUNNING);
322 
323 	return timeout - t;
324 }
325 
326 static int
327 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
328 {
329 	int ret = 0;
330 
331 	while (!nouveau_fence_done(fence)) {
332 		if (time_after_eq(jiffies, fence->timeout)) {
333 			ret = -EBUSY;
334 			break;
335 		}
336 
337 		__set_current_state(intr ?
338 				    TASK_INTERRUPTIBLE :
339 				    TASK_UNINTERRUPTIBLE);
340 
341 		if (intr && signal_pending(current)) {
342 			ret = -ERESTARTSYS;
343 			break;
344 		}
345 	}
346 
347 	__set_current_state(TASK_RUNNING);
348 	return ret;
349 }
350 
351 int
352 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
353 {
354 	long ret;
355 
356 	if (!lazy)
357 		return nouveau_fence_wait_busy(fence, intr);
358 
359 	ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
360 	if (ret < 0)
361 		return ret;
362 	else if (!ret)
363 		return -EBUSY;
364 	else
365 		return 0;
366 }
367 
368 int
369 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
370 {
371 	struct nouveau_fence_chan *fctx = chan->fence;
372 	struct fence *fence;
373 	struct reservation_object *resv = nvbo->bo.resv;
374 	struct reservation_object_list *fobj;
375 	struct nouveau_fence *f;
376 	int ret = 0, i;
377 
378 	if (!exclusive) {
379 		ret = reservation_object_reserve_shared(resv);
380 
381 		if (ret)
382 			return ret;
383 	}
384 
385 	fobj = reservation_object_get_list(resv);
386 	fence = reservation_object_get_excl(resv);
387 
388 	if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 		struct nouveau_channel *prev = NULL;
390 
391 		f = nouveau_local_fence(fence, chan->drm);
392 		if (f)
393 			prev = f->channel;
394 
395 		if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
396 			ret = fence_wait(fence, intr);
397 
398 		return ret;
399 	}
400 
401 	if (!exclusive || !fobj)
402 		return ret;
403 
404 	for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 		struct nouveau_channel *prev = NULL;
406 
407 		fence = rcu_dereference_protected(fobj->shared[i],
408 						reservation_object_held(resv));
409 
410 		f = nouveau_local_fence(fence, chan->drm);
411 		if (f)
412 			prev = f->channel;
413 
414 		if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
415 			ret = fence_wait(fence, intr);
416 
417 		if (ret)
418 			break;
419 	}
420 
421 	return ret;
422 }
423 
424 void
425 nouveau_fence_unref(struct nouveau_fence **pfence)
426 {
427 	if (*pfence)
428 		fence_put(&(*pfence)->base);
429 	*pfence = NULL;
430 }
431 
432 int
433 nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
434 		  struct nouveau_fence **pfence)
435 {
436 	struct nouveau_fence *fence;
437 	int ret = 0;
438 
439 	if (unlikely(!chan->fence))
440 		return -ENODEV;
441 
442 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
443 	if (!fence)
444 		return -ENOMEM;
445 
446 	fence->sysmem = sysmem;
447 
448 	ret = nouveau_fence_emit(fence, chan);
449 	if (ret)
450 		nouveau_fence_unref(&fence);
451 
452 	*pfence = fence;
453 	return ret;
454 }
455 
456 static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
457 {
458 	return "nouveau";
459 }
460 
461 static const char *nouveau_fence_get_timeline_name(struct fence *f)
462 {
463 	struct nouveau_fence *fence = from_fence(f);
464 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 
466 	return fence->channel ? fctx->name : "dead channel";
467 }
468 
469 /*
470  * In an ideal world, read would not assume the channel context is still alive.
471  * This function may be called from another device, running into free memory as a
472  * result. The drm node should still be there, so we can derive the index from
473  * the fence context.
474  */
475 static bool nouveau_fence_is_signaled(struct fence *f)
476 {
477 	struct nouveau_fence *fence = from_fence(f);
478 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 	struct nouveau_channel *chan = fence->channel;
480 
481 	return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
482 }
483 
484 static bool nouveau_fence_no_signaling(struct fence *f)
485 {
486 	struct nouveau_fence *fence = from_fence(f);
487 
488 	/*
489 	 * caller should have a reference on the fence,
490 	 * else fence could get freed here
491 	 */
492 	WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
493 
494 	/*
495 	 * This needs uevents to work correctly, but fence_add_callback relies on
496 	 * being able to enable signaling. It will still get signaled eventually,
497 	 * just not right away.
498 	 */
499 	if (nouveau_fence_is_signaled(f)) {
500 		list_del(&fence->head);
501 
502 		fence_put(&fence->base);
503 		return false;
504 	}
505 
506 	return true;
507 }
508 
509 static void nouveau_fence_release(struct fence *f)
510 {
511 	struct nouveau_fence *fence = from_fence(f);
512 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
513 
514 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
515 	fence_free(&fence->base);
516 }
517 
518 static const struct fence_ops nouveau_fence_ops_legacy = {
519 	.get_driver_name = nouveau_fence_get_get_driver_name,
520 	.get_timeline_name = nouveau_fence_get_timeline_name,
521 	.enable_signaling = nouveau_fence_no_signaling,
522 	.signaled = nouveau_fence_is_signaled,
523 	.wait = nouveau_fence_wait_legacy,
524 	.release = nouveau_fence_release
525 };
526 
527 static bool nouveau_fence_enable_signaling(struct fence *f)
528 {
529 	struct nouveau_fence *fence = from_fence(f);
530 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
531 	bool ret;
532 
533 	if (!fctx->notify_ref++)
534 		nvif_notify_get(&fctx->notify);
535 
536 	ret = nouveau_fence_no_signaling(f);
537 	if (ret)
538 		set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
539 	else if (!--fctx->notify_ref)
540 		nvif_notify_put(&fctx->notify);
541 
542 	return ret;
543 }
544 
545 static const struct fence_ops nouveau_fence_ops_uevent = {
546 	.get_driver_name = nouveau_fence_get_get_driver_name,
547 	.get_timeline_name = nouveau_fence_get_timeline_name,
548 	.enable_signaling = nouveau_fence_enable_signaling,
549 	.signaled = nouveau_fence_is_signaled,
550 	.wait = fence_default_wait,
551 	.release = NULL
552 };
553