1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright (C) 2015 Red Hat, Inc.
4  * All Rights Reserved.
5  *
6  * Authors:
7  *    Dave Airlie
8  *    Alon Levy
9  */
10 
11 #include <linux/dma-fence-unwrap.h>
12 #include <linux/file.h>
13 #include <linux/sync_file.h>
14 #include <linux/uaccess.h>
15 
16 #include <drm/drm_file.h>
17 #include <drm/virtgpu_drm.h>
18 
19 #include "virtgpu_drv.h"
20 
21 struct virtio_gpu_submit {
22 	struct virtio_gpu_object_array *buflist;
23 	struct drm_virtgpu_execbuffer *exbuf;
24 	struct virtio_gpu_fence *out_fence;
25 	struct virtio_gpu_fpriv *vfpriv;
26 	struct virtio_gpu_device *vgdev;
27 	struct sync_file *sync_file;
28 	struct drm_file *file;
29 	int out_fence_fd;
30 	u64 fence_ctx;
31 	u32 ring_idx;
32 	void *buf;
33 };
34 
35 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
36 				    struct dma_fence *in_fence)
37 {
38 	u32 context = submit->fence_ctx + submit->ring_idx;
39 
40 	if (dma_fence_match_context(in_fence, context))
41 		return 0;
42 
43 	return dma_fence_wait(in_fence, true);
44 }
45 
46 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
47 				     struct dma_fence *fence)
48 {
49 	struct dma_fence_unwrap itr;
50 	struct dma_fence *f;
51 	int err;
52 
53 	dma_fence_unwrap_for_each(f, &itr, fence) {
54 		err = virtio_gpu_do_fence_wait(submit, f);
55 		if (err)
56 			return err;
57 	}
58 
59 	return 0;
60 }
61 
62 static int virtio_gpu_fence_event_create(struct drm_device *dev,
63 					 struct drm_file *file,
64 					 struct virtio_gpu_fence *fence,
65 					 u32 ring_idx)
66 {
67 	struct virtio_gpu_fence_event *e = NULL;
68 	int ret;
69 
70 	e = kzalloc(sizeof(*e), GFP_KERNEL);
71 	if (!e)
72 		return -ENOMEM;
73 
74 	e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
75 	e->event.length = sizeof(e->event);
76 
77 	ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
78 	if (ret) {
79 		kfree(e);
80 		return ret;
81 	}
82 
83 	fence->e = e;
84 
85 	return 0;
86 }
87 
88 static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
89 {
90 	struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
91 	u32 *bo_handles;
92 
93 	if (!exbuf->num_bo_handles)
94 		return 0;
95 
96 	bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
97 				    GFP_KERNEL);
98 	if (!bo_handles)
99 		return -ENOMEM;
100 
101 	if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
102 			   exbuf->num_bo_handles * sizeof(*bo_handles))) {
103 		kvfree(bo_handles);
104 		return -EFAULT;
105 	}
106 
107 	submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
108 							exbuf->num_bo_handles);
109 	if (!submit->buflist) {
110 		kvfree(bo_handles);
111 		return -ENOENT;
112 	}
113 
114 	kvfree(bo_handles);
115 
116 	return 0;
117 }
118 
119 static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
120 {
121 	if (!IS_ERR(submit->buf))
122 		kvfree(submit->buf);
123 
124 	if (submit->buflist)
125 		virtio_gpu_array_put_free(submit->buflist);
126 
127 	if (submit->out_fence_fd >= 0)
128 		put_unused_fd(submit->out_fence_fd);
129 
130 	if (submit->out_fence)
131 		dma_fence_put(&submit->out_fence->f);
132 
133 	if (submit->sync_file)
134 		fput(submit->sync_file->file);
135 }
136 
137 static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
138 {
139 	virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
140 			      submit->vfpriv->ctx_id, submit->buflist,
141 			      submit->out_fence);
142 	virtio_gpu_notify(submit->vgdev);
143 }
144 
145 static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
146 {
147 	submit->buf = NULL;
148 	submit->buflist = NULL;
149 	submit->sync_file = NULL;
150 	submit->out_fence = NULL;
151 	submit->out_fence_fd = -1;
152 }
153 
154 static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
155 				  struct drm_virtgpu_execbuffer *exbuf,
156 				  struct drm_device *dev,
157 				  struct drm_file *file,
158 				  u64 fence_ctx, u32 ring_idx)
159 {
160 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
161 	struct virtio_gpu_device *vgdev = dev->dev_private;
162 	struct virtio_gpu_fence *out_fence;
163 	bool drm_fence_event;
164 	int err;
165 
166 	memset(submit, 0, sizeof(*submit));
167 
168 	if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
169 	    (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
170 		drm_fence_event = true;
171 	else
172 		drm_fence_event = false;
173 
174 	if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
175 	    exbuf->num_bo_handles ||
176 	    drm_fence_event)
177 		out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
178 	else
179 		out_fence = NULL;
180 
181 	if (drm_fence_event) {
182 		err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
183 		if (err) {
184 			dma_fence_put(&out_fence->f);
185 			return err;
186 		}
187 	}
188 
189 	submit->out_fence = out_fence;
190 	submit->fence_ctx = fence_ctx;
191 	submit->ring_idx = ring_idx;
192 	submit->out_fence_fd = -1;
193 	submit->vfpriv = vfpriv;
194 	submit->vgdev = vgdev;
195 	submit->exbuf = exbuf;
196 	submit->file = file;
197 
198 	err = virtio_gpu_init_submit_buflist(submit);
199 	if (err)
200 		return err;
201 
202 	submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
203 	if (IS_ERR(submit->buf))
204 		return PTR_ERR(submit->buf);
205 
206 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
207 		err = get_unused_fd_flags(O_CLOEXEC);
208 		if (err < 0)
209 			return err;
210 
211 		submit->out_fence_fd = err;
212 
213 		submit->sync_file = sync_file_create(&out_fence->f);
214 		if (!submit->sync_file)
215 			return -ENOMEM;
216 	}
217 
218 	return 0;
219 }
220 
221 static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
222 {
223 	int ret = 0;
224 
225 	if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
226 		struct dma_fence *in_fence =
227 				sync_file_get_fence(submit->exbuf->fence_fd);
228 		if (!in_fence)
229 			return -EINVAL;
230 
231 		/*
232 		 * Wait if the fence is from a foreign context, or if the
233 		 * fence array contains any fence from a foreign context.
234 		 */
235 		ret = virtio_gpu_dma_fence_wait(submit, in_fence);
236 
237 		dma_fence_put(in_fence);
238 	}
239 
240 	return ret;
241 }
242 
243 static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
244 {
245 	if (submit->sync_file) {
246 		submit->exbuf->fence_fd = submit->out_fence_fd;
247 		fd_install(submit->out_fence_fd, submit->sync_file->file);
248 	}
249 }
250 
251 static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
252 {
253 	if (submit->buflist)
254 		return virtio_gpu_array_lock_resv(submit->buflist);
255 
256 	return 0;
257 }
258 
259 int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
260 				struct drm_file *file)
261 {
262 	struct virtio_gpu_device *vgdev = dev->dev_private;
263 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
264 	u64 fence_ctx = vgdev->fence_drv.context;
265 	struct drm_virtgpu_execbuffer *exbuf = data;
266 	struct virtio_gpu_submit submit;
267 	u32 ring_idx = 0;
268 	int ret = -EINVAL;
269 
270 	if (!vgdev->has_virgl_3d)
271 		return -ENOSYS;
272 
273 	if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
274 		return ret;
275 
276 	if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
277 		if (exbuf->ring_idx >= vfpriv->num_rings)
278 			return ret;
279 
280 		if (!vfpriv->base_fence_ctx)
281 			return ret;
282 
283 		fence_ctx = vfpriv->base_fence_ctx;
284 		ring_idx = exbuf->ring_idx;
285 	}
286 
287 	virtio_gpu_create_context(dev, file);
288 
289 	ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
290 				     fence_ctx, ring_idx);
291 	if (ret)
292 		goto cleanup;
293 
294 	/*
295 	 * Await in-fences in the end of the job submission path to
296 	 * optimize the path by proceeding directly to the submission
297 	 * to virtio after the waits.
298 	 */
299 	ret = virtio_gpu_wait_in_fence(&submit);
300 	if (ret)
301 		goto cleanup;
302 
303 	ret = virtio_gpu_lock_buflist(&submit);
304 	if (ret)
305 		goto cleanup;
306 
307 	virtio_gpu_submit(&submit);
308 
309 	/*
310 	 * Set up usr-out data after submitting the job to optimize
311 	 * the job submission path.
312 	 */
313 	virtio_gpu_install_out_fence_fd(&submit);
314 	virtio_gpu_complete_submit(&submit);
315 cleanup:
316 	virtio_gpu_cleanup_submit(&submit);
317 
318 	return ret;
319 }
320