1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright (C) 2015 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * Authors:
7 * Dave Airlie
8 * Alon Levy
9 */
10
11 #include <linux/dma-fence-unwrap.h>
12 #include <linux/file.h>
13 #include <linux/sync_file.h>
14 #include <linux/uaccess.h>
15
16 #include <drm/drm_file.h>
17 #include <drm/drm_syncobj.h>
18 #include <drm/virtgpu_drm.h>
19
20 #include "virtgpu_drv.h"
21
22 struct virtio_gpu_submit_post_dep {
23 struct drm_syncobj *syncobj;
24 struct dma_fence_chain *chain;
25 u64 point;
26 };
27
28 struct virtio_gpu_submit {
29 struct virtio_gpu_submit_post_dep *post_deps;
30 unsigned int num_out_syncobjs;
31
32 struct drm_syncobj **in_syncobjs;
33 unsigned int num_in_syncobjs;
34
35 struct virtio_gpu_object_array *buflist;
36 struct drm_virtgpu_execbuffer *exbuf;
37 struct virtio_gpu_fence *out_fence;
38 struct virtio_gpu_fpriv *vfpriv;
39 struct virtio_gpu_device *vgdev;
40 struct sync_file *sync_file;
41 struct drm_file *file;
42 int out_fence_fd;
43 u64 fence_ctx;
44 u32 ring_idx;
45 void *buf;
46 };
47
virtio_gpu_do_fence_wait(struct virtio_gpu_submit * submit,struct dma_fence * in_fence)48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
49 struct dma_fence *in_fence)
50 {
51 u64 context = submit->fence_ctx + submit->ring_idx;
52
53 if (dma_fence_match_context(in_fence, context))
54 return 0;
55
56 return dma_fence_wait(in_fence, true);
57 }
58
virtio_gpu_dma_fence_wait(struct virtio_gpu_submit * submit,struct dma_fence * fence)59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
60 struct dma_fence *fence)
61 {
62 struct dma_fence_unwrap itr;
63 struct dma_fence *f;
64 int err;
65
66 dma_fence_unwrap_for_each(f, &itr, fence) {
67 err = virtio_gpu_do_fence_wait(submit, f);
68 if (err)
69 return err;
70 }
71
72 return 0;
73 }
74
virtio_gpu_free_syncobjs(struct drm_syncobj ** syncobjs,u32 nr_syncobjs)75 static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
76 u32 nr_syncobjs)
77 {
78 u32 i = nr_syncobjs;
79
80 while (i--) {
81 if (syncobjs[i])
82 drm_syncobj_put(syncobjs[i]);
83 }
84
85 kvfree(syncobjs);
86 }
87
88 static int
virtio_gpu_parse_deps(struct virtio_gpu_submit * submit)89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
90 {
91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
92 struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
93 size_t syncobj_stride = exbuf->syncobj_stride;
94 u32 num_in_syncobjs = exbuf->num_in_syncobjs;
95 struct drm_syncobj **syncobjs;
96 int ret = 0, i;
97
98 if (!num_in_syncobjs)
99 return 0;
100
101 /*
102 * kvalloc at first tries to allocate memory using kmalloc and
103 * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
104 * internally for allocations larger than a page size, preventing
105 * storm of KMSG warnings.
106 */
107 syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
108 if (!syncobjs)
109 return -ENOMEM;
110
111 for (i = 0; i < num_in_syncobjs; i++) {
112 u64 address = exbuf->in_syncobjs + i * syncobj_stride;
113 struct dma_fence *fence;
114
115 memset(&syncobj_desc, 0, sizeof(syncobj_desc));
116
117 if (copy_from_user(&syncobj_desc,
118 u64_to_user_ptr(address),
119 min(syncobj_stride, sizeof(syncobj_desc)))) {
120 ret = -EFAULT;
121 break;
122 }
123
124 if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
125 ret = -EINVAL;
126 break;
127 }
128
129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
130 syncobj_desc.point, 0, &fence);
131 if (ret)
132 break;
133
134 ret = virtio_gpu_dma_fence_wait(submit, fence);
135
136 dma_fence_put(fence);
137 if (ret)
138 break;
139
140 if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
141 syncobjs[i] = drm_syncobj_find(submit->file,
142 syncobj_desc.handle);
143 if (!syncobjs[i]) {
144 ret = -EINVAL;
145 break;
146 }
147 }
148 }
149
150 if (ret) {
151 virtio_gpu_free_syncobjs(syncobjs, i);
152 return ret;
153 }
154
155 submit->num_in_syncobjs = num_in_syncobjs;
156 submit->in_syncobjs = syncobjs;
157
158 return ret;
159 }
160
virtio_gpu_reset_syncobjs(struct drm_syncobj ** syncobjs,u32 nr_syncobjs)161 static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
162 u32 nr_syncobjs)
163 {
164 u32 i;
165
166 for (i = 0; i < nr_syncobjs; i++) {
167 if (syncobjs[i])
168 drm_syncobj_replace_fence(syncobjs[i], NULL);
169 }
170 }
171
172 static void
virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep * post_deps,u32 nr_syncobjs)173 virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
174 u32 nr_syncobjs)
175 {
176 u32 i = nr_syncobjs;
177
178 while (i--) {
179 kfree(post_deps[i].chain);
180 drm_syncobj_put(post_deps[i].syncobj);
181 }
182
183 kvfree(post_deps);
184 }
185
virtio_gpu_parse_post_deps(struct virtio_gpu_submit * submit)186 static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
187 {
188 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
189 struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
190 struct virtio_gpu_submit_post_dep *post_deps;
191 u32 num_out_syncobjs = exbuf->num_out_syncobjs;
192 size_t syncobj_stride = exbuf->syncobj_stride;
193 int ret = 0, i;
194
195 if (!num_out_syncobjs)
196 return 0;
197
198 post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
199 if (!post_deps)
200 return -ENOMEM;
201
202 for (i = 0; i < num_out_syncobjs; i++) {
203 u64 address = exbuf->out_syncobjs + i * syncobj_stride;
204
205 memset(&syncobj_desc, 0, sizeof(syncobj_desc));
206
207 if (copy_from_user(&syncobj_desc,
208 u64_to_user_ptr(address),
209 min(syncobj_stride, sizeof(syncobj_desc)))) {
210 ret = -EFAULT;
211 break;
212 }
213
214 post_deps[i].point = syncobj_desc.point;
215
216 if (syncobj_desc.flags) {
217 ret = -EINVAL;
218 break;
219 }
220
221 if (syncobj_desc.point) {
222 post_deps[i].chain = dma_fence_chain_alloc();
223 if (!post_deps[i].chain) {
224 ret = -ENOMEM;
225 break;
226 }
227 }
228
229 post_deps[i].syncobj = drm_syncobj_find(submit->file,
230 syncobj_desc.handle);
231 if (!post_deps[i].syncobj) {
232 kfree(post_deps[i].chain);
233 ret = -EINVAL;
234 break;
235 }
236 }
237
238 if (ret) {
239 virtio_gpu_free_post_deps(post_deps, i);
240 return ret;
241 }
242
243 submit->num_out_syncobjs = num_out_syncobjs;
244 submit->post_deps = post_deps;
245
246 return 0;
247 }
248
249 static void
virtio_gpu_process_post_deps(struct virtio_gpu_submit * submit)250 virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
251 {
252 struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
253
254 if (post_deps) {
255 struct dma_fence *fence = &submit->out_fence->f;
256 u32 i;
257
258 for (i = 0; i < submit->num_out_syncobjs; i++) {
259 if (post_deps[i].chain) {
260 drm_syncobj_add_point(post_deps[i].syncobj,
261 post_deps[i].chain,
262 fence, post_deps[i].point);
263 post_deps[i].chain = NULL;
264 } else {
265 drm_syncobj_replace_fence(post_deps[i].syncobj,
266 fence);
267 }
268 }
269 }
270 }
271
virtio_gpu_fence_event_create(struct drm_device * dev,struct drm_file * file,struct virtio_gpu_fence * fence,u32 ring_idx)272 static int virtio_gpu_fence_event_create(struct drm_device *dev,
273 struct drm_file *file,
274 struct virtio_gpu_fence *fence,
275 u32 ring_idx)
276 {
277 struct virtio_gpu_fence_event *e = NULL;
278 int ret;
279
280 e = kzalloc(sizeof(*e), GFP_KERNEL);
281 if (!e)
282 return -ENOMEM;
283
284 e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
285 e->event.length = sizeof(e->event);
286
287 ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
288 if (ret) {
289 kfree(e);
290 return ret;
291 }
292
293 fence->e = e;
294
295 return 0;
296 }
297
virtio_gpu_init_submit_buflist(struct virtio_gpu_submit * submit)298 static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
299 {
300 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
301 u32 *bo_handles;
302
303 if (!exbuf->num_bo_handles)
304 return 0;
305
306 bo_handles = kvmalloc_array(exbuf->num_bo_handles, sizeof(*bo_handles),
307 GFP_KERNEL);
308 if (!bo_handles)
309 return -ENOMEM;
310
311 if (copy_from_user(bo_handles, u64_to_user_ptr(exbuf->bo_handles),
312 exbuf->num_bo_handles * sizeof(*bo_handles))) {
313 kvfree(bo_handles);
314 return -EFAULT;
315 }
316
317 submit->buflist = virtio_gpu_array_from_handles(submit->file, bo_handles,
318 exbuf->num_bo_handles);
319 if (!submit->buflist) {
320 kvfree(bo_handles);
321 return -ENOENT;
322 }
323
324 kvfree(bo_handles);
325
326 return 0;
327 }
328
virtio_gpu_cleanup_submit(struct virtio_gpu_submit * submit)329 static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
330 {
331 virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
332 virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
333 virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
334
335 if (!IS_ERR(submit->buf))
336 kvfree(submit->buf);
337
338 if (submit->buflist)
339 virtio_gpu_array_put_free(submit->buflist);
340
341 if (submit->out_fence_fd >= 0)
342 put_unused_fd(submit->out_fence_fd);
343
344 if (submit->out_fence)
345 dma_fence_put(&submit->out_fence->f);
346
347 if (submit->sync_file)
348 fput(submit->sync_file->file);
349 }
350
virtio_gpu_submit(struct virtio_gpu_submit * submit)351 static void virtio_gpu_submit(struct virtio_gpu_submit *submit)
352 {
353 virtio_gpu_cmd_submit(submit->vgdev, submit->buf, submit->exbuf->size,
354 submit->vfpriv->ctx_id, submit->buflist,
355 submit->out_fence);
356 virtio_gpu_notify(submit->vgdev);
357 }
358
virtio_gpu_complete_submit(struct virtio_gpu_submit * submit)359 static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
360 {
361 submit->buf = NULL;
362 submit->buflist = NULL;
363 submit->sync_file = NULL;
364 submit->out_fence_fd = -1;
365 }
366
virtio_gpu_init_submit(struct virtio_gpu_submit * submit,struct drm_virtgpu_execbuffer * exbuf,struct drm_device * dev,struct drm_file * file,u64 fence_ctx,u32 ring_idx)367 static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
368 struct drm_virtgpu_execbuffer *exbuf,
369 struct drm_device *dev,
370 struct drm_file *file,
371 u64 fence_ctx, u32 ring_idx)
372 {
373 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
374 struct virtio_gpu_device *vgdev = dev->dev_private;
375 struct virtio_gpu_fence *out_fence;
376 bool drm_fence_event;
377 int err;
378
379 memset(submit, 0, sizeof(*submit));
380
381 if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) &&
382 (vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
383 drm_fence_event = true;
384 else
385 drm_fence_event = false;
386
387 if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
388 exbuf->num_out_syncobjs ||
389 exbuf->num_bo_handles ||
390 drm_fence_event)
391 out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
392 else
393 out_fence = NULL;
394
395 if (drm_fence_event) {
396 err = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
397 if (err) {
398 dma_fence_put(&out_fence->f);
399 return err;
400 }
401 }
402
403 submit->out_fence = out_fence;
404 submit->fence_ctx = fence_ctx;
405 submit->ring_idx = ring_idx;
406 submit->out_fence_fd = -1;
407 submit->vfpriv = vfpriv;
408 submit->vgdev = vgdev;
409 submit->exbuf = exbuf;
410 submit->file = file;
411
412 err = virtio_gpu_init_submit_buflist(submit);
413 if (err)
414 return err;
415
416 submit->buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
417 if (IS_ERR(submit->buf))
418 return PTR_ERR(submit->buf);
419
420 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
421 err = get_unused_fd_flags(O_CLOEXEC);
422 if (err < 0)
423 return err;
424
425 submit->out_fence_fd = err;
426
427 submit->sync_file = sync_file_create(&out_fence->f);
428 if (!submit->sync_file)
429 return -ENOMEM;
430 }
431
432 return 0;
433 }
434
virtio_gpu_wait_in_fence(struct virtio_gpu_submit * submit)435 static int virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit)
436 {
437 int ret = 0;
438
439 if (submit->exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
440 struct dma_fence *in_fence =
441 sync_file_get_fence(submit->exbuf->fence_fd);
442 if (!in_fence)
443 return -EINVAL;
444
445 /*
446 * Wait if the fence is from a foreign context, or if the
447 * fence array contains any fence from a foreign context.
448 */
449 ret = virtio_gpu_dma_fence_wait(submit, in_fence);
450
451 dma_fence_put(in_fence);
452 }
453
454 return ret;
455 }
456
virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit * submit)457 static void virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit)
458 {
459 if (submit->sync_file) {
460 submit->exbuf->fence_fd = submit->out_fence_fd;
461 fd_install(submit->out_fence_fd, submit->sync_file->file);
462 }
463 }
464
virtio_gpu_lock_buflist(struct virtio_gpu_submit * submit)465 static int virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit)
466 {
467 if (submit->buflist)
468 return virtio_gpu_array_lock_resv(submit->buflist);
469
470 return 0;
471 }
472
virtio_gpu_execbuffer_ioctl(struct drm_device * dev,void * data,struct drm_file * file)473 int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
474 struct drm_file *file)
475 {
476 struct virtio_gpu_device *vgdev = dev->dev_private;
477 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
478 u64 fence_ctx = vgdev->fence_drv.context;
479 struct drm_virtgpu_execbuffer *exbuf = data;
480 struct virtio_gpu_submit submit;
481 u32 ring_idx = 0;
482 int ret = -EINVAL;
483
484 if (!vgdev->has_virgl_3d)
485 return -ENOSYS;
486
487 if (exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)
488 return ret;
489
490 if (exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) {
491 if (exbuf->ring_idx >= vfpriv->num_rings)
492 return ret;
493
494 if (!vfpriv->base_fence_ctx)
495 return ret;
496
497 fence_ctx = vfpriv->base_fence_ctx;
498 ring_idx = exbuf->ring_idx;
499 }
500
501 virtio_gpu_create_context(dev, file);
502
503 ret = virtio_gpu_init_submit(&submit, exbuf, dev, file,
504 fence_ctx, ring_idx);
505 if (ret)
506 goto cleanup;
507
508 ret = virtio_gpu_parse_post_deps(&submit);
509 if (ret)
510 goto cleanup;
511
512 ret = virtio_gpu_parse_deps(&submit);
513 if (ret)
514 goto cleanup;
515
516 /*
517 * Await in-fences in the end of the job submission path to
518 * optimize the path by proceeding directly to the submission
519 * to virtio after the waits.
520 */
521 ret = virtio_gpu_wait_in_fence(&submit);
522 if (ret)
523 goto cleanup;
524
525 ret = virtio_gpu_lock_buflist(&submit);
526 if (ret)
527 goto cleanup;
528
529 virtio_gpu_submit(&submit);
530
531 /*
532 * Set up usr-out data after submitting the job to optimize
533 * the job submission path.
534 */
535 virtio_gpu_install_out_fence_fd(&submit);
536 virtio_gpu_process_post_deps(&submit);
537 virtio_gpu_complete_submit(&submit);
538 cleanup:
539 virtio_gpu_cleanup_submit(&submit);
540
541 return ret;
542 }
543