xref: /openbmc/linux/drivers/gpu/drm/drm_syncobj.c (revision d4295e12)
1 /*
2  * Copyright 2017 Red Hat
3  * Parts ported from amdgpu (fence wait code).
4  * Copyright 2016 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *
27  */
28 
29 /**
30  * DOC: Overview
31  *
32  * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33  * persistent objects that contain an optional fence. The fence can be updated
34  * with a new fence, or be NULL.
35  *
36  * syncobj's can be waited upon, where it will wait for the underlying
37  * fence.
38  *
39  * syncobj's can be export to fd's and back, these fd's are opaque and
40  * have no other use case, except passing the syncobj between processes.
41  *
42  * Their primary use-case is to implement Vulkan fences and semaphores.
43  *
44  * syncobj have a kref reference count, but also have an optional file.
45  * The file is only created once the syncobj is exported.
46  * The file takes a reference on the kref.
47  */
48 
49 #include <drm/drmP.h>
50 #include <linux/file.h>
51 #include <linux/fs.h>
52 #include <linux/anon_inodes.h>
53 #include <linux/sync_file.h>
54 #include <linux/sched/signal.h>
55 
56 #include "drm_internal.h"
57 #include <drm/drm_syncobj.h>
58 
59 /* merge normal syncobj to timeline syncobj, the point interval is 1 */
60 #define DRM_SYNCOBJ_BINARY_POINT 1
61 
62 struct drm_syncobj_stub_fence {
63 	struct dma_fence base;
64 	spinlock_t lock;
65 };
66 
67 static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
68 {
69         return "syncobjstub";
70 }
71 
72 static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
73 	.get_driver_name = drm_syncobj_stub_fence_get_name,
74 	.get_timeline_name = drm_syncobj_stub_fence_get_name,
75 };
76 
77 struct drm_syncobj_signal_pt {
78 	struct dma_fence_array *fence_array;
79 	u64    value;
80 	struct list_head list;
81 };
82 
83 static DEFINE_SPINLOCK(signaled_fence_lock);
84 static struct dma_fence signaled_fence;
85 
86 static struct dma_fence *drm_syncobj_get_stub_fence(void)
87 {
88 	spin_lock(&signaled_fence_lock);
89 	if (!signaled_fence.ops) {
90 		dma_fence_init(&signaled_fence,
91 			       &drm_syncobj_stub_fence_ops,
92 			       &signaled_fence_lock,
93 			       0, 0);
94 		dma_fence_signal_locked(&signaled_fence);
95 	}
96 	spin_unlock(&signaled_fence_lock);
97 
98 	return dma_fence_get(&signaled_fence);
99 }
100 /**
101  * drm_syncobj_find - lookup and reference a sync object.
102  * @file_private: drm file private pointer
103  * @handle: sync object handle to lookup.
104  *
105  * Returns a reference to the syncobj pointed to by handle or NULL. The
106  * reference must be released by calling drm_syncobj_put().
107  */
108 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
109 				     u32 handle)
110 {
111 	struct drm_syncobj *syncobj;
112 
113 	spin_lock(&file_private->syncobj_table_lock);
114 
115 	/* Check if we currently have a reference on the object */
116 	syncobj = idr_find(&file_private->syncobj_idr, handle);
117 	if (syncobj)
118 		drm_syncobj_get(syncobj);
119 
120 	spin_unlock(&file_private->syncobj_table_lock);
121 
122 	return syncobj;
123 }
124 EXPORT_SYMBOL(drm_syncobj_find);
125 
126 static struct dma_fence *
127 drm_syncobj_find_signal_pt_for_point(struct drm_syncobj *syncobj,
128 				     uint64_t point)
129 {
130 	struct drm_syncobj_signal_pt *signal_pt;
131 
132 	if ((syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) &&
133 	    (point <= syncobj->timeline))
134 		return drm_syncobj_get_stub_fence();
135 
136 	list_for_each_entry(signal_pt, &syncobj->signal_pt_list, list) {
137 		if (point > signal_pt->value)
138 			continue;
139 		if ((syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) &&
140 		    (point != signal_pt->value))
141 			continue;
142 		return dma_fence_get(&signal_pt->fence_array->base);
143 	}
144 	return NULL;
145 }
146 
147 static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj,
148 					    struct drm_syncobj_cb *cb,
149 					    drm_syncobj_func_t func)
150 {
151 	cb->func = func;
152 	list_add_tail(&cb->node, &syncobj->cb_list);
153 }
154 
155 static void drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
156 						  struct dma_fence **fence,
157 						  struct drm_syncobj_cb *cb,
158 						  drm_syncobj_func_t func)
159 {
160 	u64 pt_value = 0;
161 
162 	WARN_ON(*fence);
163 
164 	if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
165 		/*BINARY syncobj always wait on last pt */
166 		pt_value = syncobj->signal_point;
167 
168 		if (pt_value == 0)
169 			pt_value += DRM_SYNCOBJ_BINARY_POINT;
170 	}
171 
172 	mutex_lock(&syncobj->cb_mutex);
173 	spin_lock(&syncobj->pt_lock);
174 	*fence = drm_syncobj_find_signal_pt_for_point(syncobj, pt_value);
175 	spin_unlock(&syncobj->pt_lock);
176 	if (!*fence)
177 		drm_syncobj_add_callback_locked(syncobj, cb, func);
178 	mutex_unlock(&syncobj->cb_mutex);
179 }
180 
181 static void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
182 					struct drm_syncobj_cb *cb)
183 {
184 	mutex_lock(&syncobj->cb_mutex);
185 	list_del_init(&cb->node);
186 	mutex_unlock(&syncobj->cb_mutex);
187 }
188 
189 static void drm_syncobj_init(struct drm_syncobj *syncobj)
190 {
191 	spin_lock(&syncobj->pt_lock);
192 	syncobj->timeline_context = dma_fence_context_alloc(1);
193 	syncobj->timeline = 0;
194 	syncobj->signal_point = 0;
195 	init_waitqueue_head(&syncobj->wq);
196 
197 	INIT_LIST_HEAD(&syncobj->signal_pt_list);
198 	spin_unlock(&syncobj->pt_lock);
199 }
200 
201 static void drm_syncobj_fini(struct drm_syncobj *syncobj)
202 {
203 	struct drm_syncobj_signal_pt *signal_pt = NULL, *tmp;
204 
205 	spin_lock(&syncobj->pt_lock);
206 	list_for_each_entry_safe(signal_pt, tmp,
207 				 &syncobj->signal_pt_list, list) {
208 		list_del(&signal_pt->list);
209 		dma_fence_put(&signal_pt->fence_array->base);
210 		kfree(signal_pt);
211 	}
212 	spin_unlock(&syncobj->pt_lock);
213 }
214 
215 static int drm_syncobj_create_signal_pt(struct drm_syncobj *syncobj,
216 					struct dma_fence *fence,
217 					u64 point)
218 {
219 	struct drm_syncobj_signal_pt *signal_pt =
220 		kzalloc(sizeof(struct drm_syncobj_signal_pt), GFP_KERNEL);
221 	struct drm_syncobj_signal_pt *tail_pt;
222 	struct dma_fence **fences;
223 	int num_fences = 0;
224 	int ret = 0, i;
225 
226 	if (!signal_pt)
227 		return -ENOMEM;
228 	if (!fence)
229 		goto out;
230 
231 	fences = kmalloc_array(sizeof(void *), 2, GFP_KERNEL);
232 	if (!fences) {
233 		ret = -ENOMEM;
234 		goto out;
235 	}
236 	fences[num_fences++] = dma_fence_get(fence);
237 	/* timeline syncobj must take this dependency */
238 	if (syncobj->type == DRM_SYNCOBJ_TYPE_TIMELINE) {
239 		spin_lock(&syncobj->pt_lock);
240 		if (!list_empty(&syncobj->signal_pt_list)) {
241 			tail_pt = list_last_entry(&syncobj->signal_pt_list,
242 						  struct drm_syncobj_signal_pt, list);
243 			fences[num_fences++] =
244 				dma_fence_get(&tail_pt->fence_array->base);
245 		}
246 		spin_unlock(&syncobj->pt_lock);
247 	}
248 	signal_pt->fence_array = dma_fence_array_create(num_fences, fences,
249 							syncobj->timeline_context,
250 							point, false);
251 	if (!signal_pt->fence_array) {
252 		ret = -ENOMEM;
253 		goto fail;
254 	}
255 
256 	spin_lock(&syncobj->pt_lock);
257 	if (syncobj->signal_point >= point) {
258 		DRM_WARN("A later signal is ready!");
259 		spin_unlock(&syncobj->pt_lock);
260 		goto exist;
261 	}
262 	signal_pt->value = point;
263 	list_add_tail(&signal_pt->list, &syncobj->signal_pt_list);
264 	syncobj->signal_point = point;
265 	spin_unlock(&syncobj->pt_lock);
266 	wake_up_all(&syncobj->wq);
267 
268 	return 0;
269 exist:
270 	dma_fence_put(&signal_pt->fence_array->base);
271 fail:
272 	for (i = 0; i < num_fences; i++)
273 		dma_fence_put(fences[i]);
274 	kfree(fences);
275 out:
276 	kfree(signal_pt);
277 	return ret;
278 }
279 
280 static void drm_syncobj_garbage_collection(struct drm_syncobj *syncobj)
281 {
282 	struct drm_syncobj_signal_pt *signal_pt, *tmp, *tail_pt;
283 
284 	spin_lock(&syncobj->pt_lock);
285 	tail_pt = list_last_entry(&syncobj->signal_pt_list,
286 				  struct drm_syncobj_signal_pt,
287 				  list);
288 	list_for_each_entry_safe(signal_pt, tmp,
289 				 &syncobj->signal_pt_list, list) {
290 		if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY &&
291 		    signal_pt == tail_pt)
292 			continue;
293 		if (dma_fence_is_signaled(&signal_pt->fence_array->base)) {
294 			syncobj->timeline = signal_pt->value;
295 			list_del(&signal_pt->list);
296 			dma_fence_put(&signal_pt->fence_array->base);
297 			kfree(signal_pt);
298 		} else {
299 			/*signal_pt is in order in list, from small to big, so
300 			 * the later must not be signal either */
301 			break;
302 		}
303 	}
304 
305 	spin_unlock(&syncobj->pt_lock);
306 }
307 /**
308  * drm_syncobj_replace_fence - replace fence in a sync object.
309  * @syncobj: Sync object to replace fence in
310  * @point: timeline point
311  * @fence: fence to install in sync file.
312  *
313  * This replaces the fence on a sync object, or a timeline point fence.
314  */
315 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
316 			       u64 point,
317 			       struct dma_fence *fence)
318 {
319 	u64 pt_value = point;
320 
321 	drm_syncobj_garbage_collection(syncobj);
322 	if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
323 		if (!fence) {
324 			drm_syncobj_fini(syncobj);
325 			drm_syncobj_init(syncobj);
326 			return;
327 		}
328 		pt_value = syncobj->signal_point +
329 			DRM_SYNCOBJ_BINARY_POINT;
330 	}
331 	drm_syncobj_create_signal_pt(syncobj, fence, pt_value);
332 	if (fence) {
333 		struct drm_syncobj_cb *cur, *tmp;
334 		LIST_HEAD(cb_list);
335 
336 		mutex_lock(&syncobj->cb_mutex);
337 		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
338 			list_del_init(&cur->node);
339 			cur->func(syncobj, cur);
340 		}
341 		mutex_unlock(&syncobj->cb_mutex);
342 	}
343 }
344 EXPORT_SYMBOL(drm_syncobj_replace_fence);
345 
346 static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
347 {
348 	struct drm_syncobj_stub_fence *fence;
349 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
350 	if (fence == NULL)
351 		return -ENOMEM;
352 
353 	spin_lock_init(&fence->lock);
354 	dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
355 		       &fence->lock, 0, 0);
356 	dma_fence_signal(&fence->base);
357 
358 	drm_syncobj_replace_fence(syncobj, 0, &fence->base);
359 
360 	dma_fence_put(&fence->base);
361 
362 	return 0;
363 }
364 
365 static int
366 drm_syncobj_point_get(struct drm_syncobj *syncobj, u64 point, u64 flags,
367 		      struct dma_fence **fence)
368 {
369 	int ret = 0;
370 
371 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
372 		ret = wait_event_interruptible(syncobj->wq,
373 					       point <= syncobj->signal_point);
374 		if (ret < 0)
375 			return ret;
376 	}
377 	spin_lock(&syncobj->pt_lock);
378 	*fence = drm_syncobj_find_signal_pt_for_point(syncobj, point);
379 	if (!*fence)
380 		ret = -EINVAL;
381 	spin_unlock(&syncobj->pt_lock);
382 	return ret;
383 }
384 
385 /**
386  * drm_syncobj_search_fence - lookup and reference the fence in a sync object or
387  * in a timeline point
388  * @syncobj: sync object pointer
389  * @point: timeline point
390  * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
391  * @fence: out parameter for the fence
392  *
393  * if flags is DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT, the function will block
394  * here until specific timeline points is reached.
395  * if not, you need a submit thread and block in userspace until all future
396  * timeline points have materialized, only then you can submit to the kernel,
397  * otherwise, function will fail to return fence.
398  *
399  * Returns 0 on success or a negative error value on failure. On success @fence
400  * contains a reference to the fence, which must be released by calling
401  * dma_fence_put().
402  */
403 int drm_syncobj_search_fence(struct drm_syncobj *syncobj, u64 point,
404 			     u64 flags, struct dma_fence **fence)
405 {
406 	u64 pt_value = point;
407 
408 	if (!syncobj)
409 		return -ENOENT;
410 
411 	drm_syncobj_garbage_collection(syncobj);
412 	if (syncobj->type == DRM_SYNCOBJ_TYPE_BINARY) {
413 		/*BINARY syncobj always wait on last pt */
414 		pt_value = syncobj->signal_point;
415 
416 		if (pt_value == 0)
417 			pt_value += DRM_SYNCOBJ_BINARY_POINT;
418 	}
419 	return drm_syncobj_point_get(syncobj, pt_value, flags, fence);
420 }
421 EXPORT_SYMBOL(drm_syncobj_search_fence);
422 
423 /**
424  * drm_syncobj_find_fence - lookup and reference the fence in a sync object
425  * @file_private: drm file private pointer
426  * @handle: sync object handle to lookup.
427  * @point: timeline point
428  * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
429  * @fence: out parameter for the fence
430  *
431  * This is just a convenience function that combines drm_syncobj_find() and
432  * drm_syncobj_lookup_fence().
433  *
434  * Returns 0 on success or a negative error value on failure. On success @fence
435  * contains a reference to the fence, which must be released by calling
436  * dma_fence_put().
437  */
438 int drm_syncobj_find_fence(struct drm_file *file_private,
439 			   u32 handle, u64 point, u64 flags,
440 			   struct dma_fence **fence)
441 {
442 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
443 	int ret;
444 
445 	ret = drm_syncobj_search_fence(syncobj, point, flags, fence);
446 	if (syncobj)
447 		drm_syncobj_put(syncobj);
448 	return ret;
449 }
450 EXPORT_SYMBOL(drm_syncobj_find_fence);
451 
452 /**
453  * drm_syncobj_free - free a sync object.
454  * @kref: kref to free.
455  *
456  * Only to be called from kref_put in drm_syncobj_put.
457  */
458 void drm_syncobj_free(struct kref *kref)
459 {
460 	struct drm_syncobj *syncobj = container_of(kref,
461 						   struct drm_syncobj,
462 						   refcount);
463 	drm_syncobj_fini(syncobj);
464 	kfree(syncobj);
465 }
466 EXPORT_SYMBOL(drm_syncobj_free);
467 
468 /**
469  * drm_syncobj_create - create a new syncobj
470  * @out_syncobj: returned syncobj
471  * @flags: DRM_SYNCOBJ_* flags
472  * @fence: if non-NULL, the syncobj will represent this fence
473  *
474  * This is the first function to create a sync object. After creating, drivers
475  * probably want to make it available to userspace, either through
476  * drm_syncobj_get_handle() or drm_syncobj_get_fd().
477  *
478  * Returns 0 on success or a negative error value on failure.
479  */
480 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
481 		       struct dma_fence *fence)
482 {
483 	int ret;
484 	struct drm_syncobj *syncobj;
485 
486 	syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
487 	if (!syncobj)
488 		return -ENOMEM;
489 
490 	kref_init(&syncobj->refcount);
491 	INIT_LIST_HEAD(&syncobj->cb_list);
492 	spin_lock_init(&syncobj->pt_lock);
493 	mutex_init(&syncobj->cb_mutex);
494 	if (flags & DRM_SYNCOBJ_CREATE_TYPE_TIMELINE)
495 		syncobj->type = DRM_SYNCOBJ_TYPE_TIMELINE;
496 	else
497 		syncobj->type = DRM_SYNCOBJ_TYPE_BINARY;
498 	drm_syncobj_init(syncobj);
499 
500 	if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
501 		ret = drm_syncobj_assign_null_handle(syncobj);
502 		if (ret < 0) {
503 			drm_syncobj_put(syncobj);
504 			return ret;
505 		}
506 	}
507 
508 	if (fence)
509 		drm_syncobj_replace_fence(syncobj, 0, fence);
510 
511 	*out_syncobj = syncobj;
512 	return 0;
513 }
514 EXPORT_SYMBOL(drm_syncobj_create);
515 
516 /**
517  * drm_syncobj_get_handle - get a handle from a syncobj
518  * @file_private: drm file private pointer
519  * @syncobj: Sync object to export
520  * @handle: out parameter with the new handle
521  *
522  * Exports a sync object created with drm_syncobj_create() as a handle on
523  * @file_private to userspace.
524  *
525  * Returns 0 on success or a negative error value on failure.
526  */
527 int drm_syncobj_get_handle(struct drm_file *file_private,
528 			   struct drm_syncobj *syncobj, u32 *handle)
529 {
530 	int ret;
531 
532 	/* take a reference to put in the idr */
533 	drm_syncobj_get(syncobj);
534 
535 	idr_preload(GFP_KERNEL);
536 	spin_lock(&file_private->syncobj_table_lock);
537 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
538 	spin_unlock(&file_private->syncobj_table_lock);
539 
540 	idr_preload_end();
541 
542 	if (ret < 0) {
543 		drm_syncobj_put(syncobj);
544 		return ret;
545 	}
546 
547 	*handle = ret;
548 	return 0;
549 }
550 EXPORT_SYMBOL(drm_syncobj_get_handle);
551 
552 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
553 					u32 *handle, uint32_t flags)
554 {
555 	int ret;
556 	struct drm_syncobj *syncobj;
557 
558 	ret = drm_syncobj_create(&syncobj, flags, NULL);
559 	if (ret)
560 		return ret;
561 
562 	ret = drm_syncobj_get_handle(file_private, syncobj, handle);
563 	drm_syncobj_put(syncobj);
564 	return ret;
565 }
566 
567 static int drm_syncobj_destroy(struct drm_file *file_private,
568 			       u32 handle)
569 {
570 	struct drm_syncobj *syncobj;
571 
572 	spin_lock(&file_private->syncobj_table_lock);
573 	syncobj = idr_remove(&file_private->syncobj_idr, handle);
574 	spin_unlock(&file_private->syncobj_table_lock);
575 
576 	if (!syncobj)
577 		return -EINVAL;
578 
579 	drm_syncobj_put(syncobj);
580 	return 0;
581 }
582 
583 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
584 {
585 	struct drm_syncobj *syncobj = file->private_data;
586 
587 	drm_syncobj_put(syncobj);
588 	return 0;
589 }
590 
591 static const struct file_operations drm_syncobj_file_fops = {
592 	.release = drm_syncobj_file_release,
593 };
594 
595 /**
596  * drm_syncobj_get_fd - get a file descriptor from a syncobj
597  * @syncobj: Sync object to export
598  * @p_fd: out parameter with the new file descriptor
599  *
600  * Exports a sync object created with drm_syncobj_create() as a file descriptor.
601  *
602  * Returns 0 on success or a negative error value on failure.
603  */
604 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
605 {
606 	struct file *file;
607 	int fd;
608 
609 	fd = get_unused_fd_flags(O_CLOEXEC);
610 	if (fd < 0)
611 		return fd;
612 
613 	file = anon_inode_getfile("syncobj_file",
614 				  &drm_syncobj_file_fops,
615 				  syncobj, 0);
616 	if (IS_ERR(file)) {
617 		put_unused_fd(fd);
618 		return PTR_ERR(file);
619 	}
620 
621 	drm_syncobj_get(syncobj);
622 	fd_install(fd, file);
623 
624 	*p_fd = fd;
625 	return 0;
626 }
627 EXPORT_SYMBOL(drm_syncobj_get_fd);
628 
629 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
630 				    u32 handle, int *p_fd)
631 {
632 	struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
633 	int ret;
634 
635 	if (!syncobj)
636 		return -EINVAL;
637 
638 	ret = drm_syncobj_get_fd(syncobj, p_fd);
639 	drm_syncobj_put(syncobj);
640 	return ret;
641 }
642 
643 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
644 				    int fd, u32 *handle)
645 {
646 	struct drm_syncobj *syncobj;
647 	struct file *file;
648 	int ret;
649 
650 	file = fget(fd);
651 	if (!file)
652 		return -EINVAL;
653 
654 	if (file->f_op != &drm_syncobj_file_fops) {
655 		fput(file);
656 		return -EINVAL;
657 	}
658 
659 	/* take a reference to put in the idr */
660 	syncobj = file->private_data;
661 	drm_syncobj_get(syncobj);
662 
663 	idr_preload(GFP_KERNEL);
664 	spin_lock(&file_private->syncobj_table_lock);
665 	ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
666 	spin_unlock(&file_private->syncobj_table_lock);
667 	idr_preload_end();
668 
669 	if (ret > 0) {
670 		*handle = ret;
671 		ret = 0;
672 	} else
673 		drm_syncobj_put(syncobj);
674 
675 	fput(file);
676 	return ret;
677 }
678 
679 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
680 					      int fd, int handle)
681 {
682 	struct dma_fence *fence = sync_file_get_fence(fd);
683 	struct drm_syncobj *syncobj;
684 
685 	if (!fence)
686 		return -EINVAL;
687 
688 	syncobj = drm_syncobj_find(file_private, handle);
689 	if (!syncobj) {
690 		dma_fence_put(fence);
691 		return -ENOENT;
692 	}
693 
694 	drm_syncobj_replace_fence(syncobj, 0, fence);
695 	dma_fence_put(fence);
696 	drm_syncobj_put(syncobj);
697 	return 0;
698 }
699 
700 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
701 					int handle, int *p_fd)
702 {
703 	int ret;
704 	struct dma_fence *fence;
705 	struct sync_file *sync_file;
706 	int fd = get_unused_fd_flags(O_CLOEXEC);
707 
708 	if (fd < 0)
709 		return fd;
710 
711 	ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
712 	if (ret)
713 		goto err_put_fd;
714 
715 	sync_file = sync_file_create(fence);
716 
717 	dma_fence_put(fence);
718 
719 	if (!sync_file) {
720 		ret = -EINVAL;
721 		goto err_put_fd;
722 	}
723 
724 	fd_install(fd, sync_file->file);
725 
726 	*p_fd = fd;
727 	return 0;
728 err_put_fd:
729 	put_unused_fd(fd);
730 	return ret;
731 }
732 /**
733  * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
734  * @file_private: drm file-private structure to set up
735  *
736  * Called at device open time, sets up the structure for handling refcounting
737  * of sync objects.
738  */
739 void
740 drm_syncobj_open(struct drm_file *file_private)
741 {
742 	idr_init_base(&file_private->syncobj_idr, 1);
743 	spin_lock_init(&file_private->syncobj_table_lock);
744 }
745 
746 static int
747 drm_syncobj_release_handle(int id, void *ptr, void *data)
748 {
749 	struct drm_syncobj *syncobj = ptr;
750 
751 	drm_syncobj_put(syncobj);
752 	return 0;
753 }
754 
755 /**
756  * drm_syncobj_release - release file-private sync object resources
757  * @file_private: drm file-private structure to clean up
758  *
759  * Called at close time when the filp is going away.
760  *
761  * Releases any remaining references on objects by this filp.
762  */
763 void
764 drm_syncobj_release(struct drm_file *file_private)
765 {
766 	idr_for_each(&file_private->syncobj_idr,
767 		     &drm_syncobj_release_handle, file_private);
768 	idr_destroy(&file_private->syncobj_idr);
769 }
770 
771 int
772 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
773 			 struct drm_file *file_private)
774 {
775 	struct drm_syncobj_create *args = data;
776 
777 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
778 		return -EOPNOTSUPP;
779 
780 	/* no valid flags yet */
781 	if (args->flags & ~(DRM_SYNCOBJ_CREATE_SIGNALED |
782 			    DRM_SYNCOBJ_CREATE_TYPE_TIMELINE))
783 		return -EINVAL;
784 
785 	return drm_syncobj_create_as_handle(file_private,
786 					    &args->handle, args->flags);
787 }
788 
789 int
790 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
791 			  struct drm_file *file_private)
792 {
793 	struct drm_syncobj_destroy *args = data;
794 
795 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
796 		return -EOPNOTSUPP;
797 
798 	/* make sure padding is empty */
799 	if (args->pad)
800 		return -EINVAL;
801 	return drm_syncobj_destroy(file_private, args->handle);
802 }
803 
804 int
805 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
806 				   struct drm_file *file_private)
807 {
808 	struct drm_syncobj_handle *args = data;
809 
810 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
811 		return -EOPNOTSUPP;
812 
813 	if (args->pad)
814 		return -EINVAL;
815 
816 	if (args->flags != 0 &&
817 	    args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
818 		return -EINVAL;
819 
820 	if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
821 		return drm_syncobj_export_sync_file(file_private, args->handle,
822 						    &args->fd);
823 
824 	return drm_syncobj_handle_to_fd(file_private, args->handle,
825 					&args->fd);
826 }
827 
828 int
829 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
830 				   struct drm_file *file_private)
831 {
832 	struct drm_syncobj_handle *args = data;
833 
834 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
835 		return -EOPNOTSUPP;
836 
837 	if (args->pad)
838 		return -EINVAL;
839 
840 	if (args->flags != 0 &&
841 	    args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
842 		return -EINVAL;
843 
844 	if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
845 		return drm_syncobj_import_sync_file_fence(file_private,
846 							  args->fd,
847 							  args->handle);
848 
849 	return drm_syncobj_fd_to_handle(file_private, args->fd,
850 					&args->handle);
851 }
852 
853 struct syncobj_wait_entry {
854 	struct task_struct *task;
855 	struct dma_fence *fence;
856 	struct dma_fence_cb fence_cb;
857 	struct drm_syncobj_cb syncobj_cb;
858 };
859 
860 static void syncobj_wait_fence_func(struct dma_fence *fence,
861 				    struct dma_fence_cb *cb)
862 {
863 	struct syncobj_wait_entry *wait =
864 		container_of(cb, struct syncobj_wait_entry, fence_cb);
865 
866 	wake_up_process(wait->task);
867 }
868 
869 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
870 				      struct drm_syncobj_cb *cb)
871 {
872 	struct syncobj_wait_entry *wait =
873 		container_of(cb, struct syncobj_wait_entry, syncobj_cb);
874 
875 	drm_syncobj_search_fence(syncobj, 0, 0, &wait->fence);
876 
877 	wake_up_process(wait->task);
878 }
879 
880 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
881 						  uint32_t count,
882 						  uint32_t flags,
883 						  signed long timeout,
884 						  uint32_t *idx)
885 {
886 	struct syncobj_wait_entry *entries;
887 	struct dma_fence *fence;
888 	uint32_t signaled_count, i;
889 
890 	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
891 	if (!entries)
892 		return -ENOMEM;
893 
894 	/* Walk the list of sync objects and initialize entries.  We do
895 	 * this up-front so that we can properly return -EINVAL if there is
896 	 * a syncobj with a missing fence and then never have the chance of
897 	 * returning -EINVAL again.
898 	 */
899 	signaled_count = 0;
900 	for (i = 0; i < count; ++i) {
901 		entries[i].task = current;
902 		drm_syncobj_search_fence(syncobjs[i], 0, 0,
903 					 &entries[i].fence);
904 		if (!entries[i].fence) {
905 			if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
906 				continue;
907 			} else {
908 				timeout = -EINVAL;
909 				goto cleanup_entries;
910 			}
911 		}
912 
913 		if (dma_fence_is_signaled(entries[i].fence)) {
914 			if (signaled_count == 0 && idx)
915 				*idx = i;
916 			signaled_count++;
917 		}
918 	}
919 
920 	if (signaled_count == count ||
921 	    (signaled_count > 0 &&
922 	     !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
923 		goto cleanup_entries;
924 
925 	/* There's a very annoying laxness in the dma_fence API here, in
926 	 * that backends are not required to automatically report when a
927 	 * fence is signaled prior to fence->ops->enable_signaling() being
928 	 * called.  So here if we fail to match signaled_count, we need to
929 	 * fallthough and try a 0 timeout wait!
930 	 */
931 
932 	if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
933 		for (i = 0; i < count; ++i) {
934 			if (entries[i].fence)
935 				continue;
936 
937 			drm_syncobj_fence_get_or_add_callback(syncobjs[i],
938 							      &entries[i].fence,
939 							      &entries[i].syncobj_cb,
940 							      syncobj_wait_syncobj_func);
941 		}
942 	}
943 
944 	do {
945 		set_current_state(TASK_INTERRUPTIBLE);
946 
947 		signaled_count = 0;
948 		for (i = 0; i < count; ++i) {
949 			fence = entries[i].fence;
950 			if (!fence)
951 				continue;
952 
953 			if (dma_fence_is_signaled(fence) ||
954 			    (!entries[i].fence_cb.func &&
955 			     dma_fence_add_callback(fence,
956 						    &entries[i].fence_cb,
957 						    syncobj_wait_fence_func))) {
958 				/* The fence has been signaled */
959 				if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
960 					signaled_count++;
961 				} else {
962 					if (idx)
963 						*idx = i;
964 					goto done_waiting;
965 				}
966 			}
967 		}
968 
969 		if (signaled_count == count)
970 			goto done_waiting;
971 
972 		if (timeout == 0) {
973 			timeout = -ETIME;
974 			goto done_waiting;
975 		}
976 
977 		if (signal_pending(current)) {
978 			timeout = -ERESTARTSYS;
979 			goto done_waiting;
980 		}
981 
982 		timeout = schedule_timeout(timeout);
983 	} while (1);
984 
985 done_waiting:
986 	__set_current_state(TASK_RUNNING);
987 
988 cleanup_entries:
989 	for (i = 0; i < count; ++i) {
990 		if (entries[i].syncobj_cb.func)
991 			drm_syncobj_remove_callback(syncobjs[i],
992 						    &entries[i].syncobj_cb);
993 		if (entries[i].fence_cb.func)
994 			dma_fence_remove_callback(entries[i].fence,
995 						  &entries[i].fence_cb);
996 		dma_fence_put(entries[i].fence);
997 	}
998 	kfree(entries);
999 
1000 	return timeout;
1001 }
1002 
1003 /**
1004  * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1005  *
1006  * @timeout_nsec: timeout nsec component in ns, 0 for poll
1007  *
1008  * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1009  */
1010 static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
1011 {
1012 	ktime_t abs_timeout, now;
1013 	u64 timeout_ns, timeout_jiffies64;
1014 
1015 	/* make 0 timeout means poll - absolute 0 doesn't seem valid */
1016 	if (timeout_nsec == 0)
1017 		return 0;
1018 
1019 	abs_timeout = ns_to_ktime(timeout_nsec);
1020 	now = ktime_get();
1021 
1022 	if (!ktime_after(abs_timeout, now))
1023 		return 0;
1024 
1025 	timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1026 
1027 	timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1028 	/*  clamp timeout to avoid infinite timeout */
1029 	if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1030 		return MAX_SCHEDULE_TIMEOUT - 1;
1031 
1032 	return timeout_jiffies64 + 1;
1033 }
1034 
1035 static int drm_syncobj_array_wait(struct drm_device *dev,
1036 				  struct drm_file *file_private,
1037 				  struct drm_syncobj_wait *wait,
1038 				  struct drm_syncobj **syncobjs)
1039 {
1040 	signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1041 	uint32_t first = ~0;
1042 
1043 	timeout = drm_syncobj_array_wait_timeout(syncobjs,
1044 						 wait->count_handles,
1045 						 wait->flags,
1046 						 timeout, &first);
1047 	if (timeout < 0)
1048 		return timeout;
1049 
1050 	wait->first_signaled = first;
1051 	return 0;
1052 }
1053 
1054 static int drm_syncobj_array_find(struct drm_file *file_private,
1055 				  void __user *user_handles,
1056 				  uint32_t count_handles,
1057 				  struct drm_syncobj ***syncobjs_out)
1058 {
1059 	uint32_t i, *handles;
1060 	struct drm_syncobj **syncobjs;
1061 	int ret;
1062 
1063 	handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1064 	if (handles == NULL)
1065 		return -ENOMEM;
1066 
1067 	if (copy_from_user(handles, user_handles,
1068 			   sizeof(uint32_t) * count_handles)) {
1069 		ret = -EFAULT;
1070 		goto err_free_handles;
1071 	}
1072 
1073 	syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1074 	if (syncobjs == NULL) {
1075 		ret = -ENOMEM;
1076 		goto err_free_handles;
1077 	}
1078 
1079 	for (i = 0; i < count_handles; i++) {
1080 		syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1081 		if (!syncobjs[i]) {
1082 			ret = -ENOENT;
1083 			goto err_put_syncobjs;
1084 		}
1085 	}
1086 
1087 	kfree(handles);
1088 	*syncobjs_out = syncobjs;
1089 	return 0;
1090 
1091 err_put_syncobjs:
1092 	while (i-- > 0)
1093 		drm_syncobj_put(syncobjs[i]);
1094 	kfree(syncobjs);
1095 err_free_handles:
1096 	kfree(handles);
1097 
1098 	return ret;
1099 }
1100 
1101 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1102 				   uint32_t count)
1103 {
1104 	uint32_t i;
1105 	for (i = 0; i < count; i++)
1106 		drm_syncobj_put(syncobjs[i]);
1107 	kfree(syncobjs);
1108 }
1109 
1110 int
1111 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1112 		       struct drm_file *file_private)
1113 {
1114 	struct drm_syncobj_wait *args = data;
1115 	struct drm_syncobj **syncobjs;
1116 	int ret = 0;
1117 
1118 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1119 		return -EOPNOTSUPP;
1120 
1121 	if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1122 			    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1123 		return -EINVAL;
1124 
1125 	if (args->count_handles == 0)
1126 		return -EINVAL;
1127 
1128 	ret = drm_syncobj_array_find(file_private,
1129 				     u64_to_user_ptr(args->handles),
1130 				     args->count_handles,
1131 				     &syncobjs);
1132 	if (ret < 0)
1133 		return ret;
1134 
1135 	ret = drm_syncobj_array_wait(dev, file_private,
1136 				     args, syncobjs);
1137 
1138 	drm_syncobj_array_free(syncobjs, args->count_handles);
1139 
1140 	return ret;
1141 }
1142 
1143 int
1144 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1145 			struct drm_file *file_private)
1146 {
1147 	struct drm_syncobj_array *args = data;
1148 	struct drm_syncobj **syncobjs;
1149 	uint32_t i;
1150 	int ret;
1151 
1152 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1153 		return -EOPNOTSUPP;
1154 
1155 	if (args->pad != 0)
1156 		return -EINVAL;
1157 
1158 	if (args->count_handles == 0)
1159 		return -EINVAL;
1160 
1161 	ret = drm_syncobj_array_find(file_private,
1162 				     u64_to_user_ptr(args->handles),
1163 				     args->count_handles,
1164 				     &syncobjs);
1165 	if (ret < 0)
1166 		return ret;
1167 
1168 	for (i = 0; i < args->count_handles; i++) {
1169 		drm_syncobj_fini(syncobjs[i]);
1170 		drm_syncobj_init(syncobjs[i]);
1171 	}
1172 	drm_syncobj_array_free(syncobjs, args->count_handles);
1173 
1174 	return ret;
1175 }
1176 
1177 int
1178 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1179 			 struct drm_file *file_private)
1180 {
1181 	struct drm_syncobj_array *args = data;
1182 	struct drm_syncobj **syncobjs;
1183 	uint32_t i;
1184 	int ret;
1185 
1186 	if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1187 		return -EOPNOTSUPP;
1188 
1189 	if (args->pad != 0)
1190 		return -EINVAL;
1191 
1192 	if (args->count_handles == 0)
1193 		return -EINVAL;
1194 
1195 	ret = drm_syncobj_array_find(file_private,
1196 				     u64_to_user_ptr(args->handles),
1197 				     args->count_handles,
1198 				     &syncobjs);
1199 	if (ret < 0)
1200 		return ret;
1201 
1202 	for (i = 0; i < args->count_handles; i++) {
1203 		ret = drm_syncobj_assign_null_handle(syncobjs[i]);
1204 		if (ret < 0)
1205 			break;
1206 	}
1207 
1208 	drm_syncobj_array_free(syncobjs, args->count_handles);
1209 
1210 	return ret;
1211 }
1212