1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31 
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34 
35 #include "virtgpu_drv.h"
36 
37 static void virtio_gpu_create_context(struct drm_device *dev,
38 				      struct drm_file *file)
39 {
40 	struct virtio_gpu_device *vgdev = dev->dev_private;
41 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
42 	char dbgname[TASK_COMM_LEN];
43 
44 	mutex_lock(&vfpriv->context_lock);
45 	if (vfpriv->context_created)
46 		goto out_unlock;
47 
48 	get_task_comm(dbgname, current);
49 	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
50 				      strlen(dbgname), dbgname);
51 	virtio_gpu_notify(vgdev);
52 	vfpriv->context_created = true;
53 
54 out_unlock:
55 	mutex_unlock(&vfpriv->context_lock);
56 }
57 
58 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
59 				struct drm_file *file)
60 {
61 	struct virtio_gpu_device *vgdev = dev->dev_private;
62 	struct drm_virtgpu_map *virtio_gpu_map = data;
63 
64 	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
65 					 virtio_gpu_map->handle,
66 					 &virtio_gpu_map->offset);
67 }
68 
69 /*
70  * Usage of execbuffer:
71  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
72  * However, the command as passed from user space must *not* contain the initial
73  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
74  */
75 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
76 				 struct drm_file *file)
77 {
78 	struct drm_virtgpu_execbuffer *exbuf = data;
79 	struct virtio_gpu_device *vgdev = dev->dev_private;
80 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
81 	struct virtio_gpu_fence *out_fence;
82 	int ret;
83 	uint32_t *bo_handles = NULL;
84 	void __user *user_bo_handles = NULL;
85 	struct virtio_gpu_object_array *buflist = NULL;
86 	struct sync_file *sync_file;
87 	int in_fence_fd = exbuf->fence_fd;
88 	int out_fence_fd = -1;
89 	void *buf;
90 
91 	if (vgdev->has_virgl_3d == false)
92 		return -ENOSYS;
93 
94 	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
95 		return -EINVAL;
96 
97 	exbuf->fence_fd = -1;
98 
99 	virtio_gpu_create_context(dev, file);
100 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
101 		struct dma_fence *in_fence;
102 
103 		in_fence = sync_file_get_fence(in_fence_fd);
104 
105 		if (!in_fence)
106 			return -EINVAL;
107 
108 		/*
109 		 * Wait if the fence is from a foreign context, or if the fence
110 		 * array contains any fence from a foreign context.
111 		 */
112 		ret = 0;
113 		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
114 			ret = dma_fence_wait(in_fence, true);
115 
116 		dma_fence_put(in_fence);
117 		if (ret)
118 			return ret;
119 	}
120 
121 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
122 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
123 		if (out_fence_fd < 0)
124 			return out_fence_fd;
125 	}
126 
127 	if (exbuf->num_bo_handles) {
128 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
129 					    sizeof(uint32_t), GFP_KERNEL);
130 		if (!bo_handles) {
131 			ret = -ENOMEM;
132 			goto out_unused_fd;
133 		}
134 
135 		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
136 		if (copy_from_user(bo_handles, user_bo_handles,
137 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
138 			ret = -EFAULT;
139 			goto out_unused_fd;
140 		}
141 
142 		buflist = virtio_gpu_array_from_handles(file, bo_handles,
143 							exbuf->num_bo_handles);
144 		if (!buflist) {
145 			ret = -ENOENT;
146 			goto out_unused_fd;
147 		}
148 		kvfree(bo_handles);
149 		bo_handles = NULL;
150 	}
151 
152 	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
153 	if (IS_ERR(buf)) {
154 		ret = PTR_ERR(buf);
155 		goto out_unused_fd;
156 	}
157 
158 	if (buflist) {
159 		ret = virtio_gpu_array_lock_resv(buflist);
160 		if (ret)
161 			goto out_memdup;
162 	}
163 
164 	out_fence = virtio_gpu_fence_alloc(vgdev);
165 	if(!out_fence) {
166 		ret = -ENOMEM;
167 		goto out_unresv;
168 	}
169 
170 	if (out_fence_fd >= 0) {
171 		sync_file = sync_file_create(&out_fence->f);
172 		if (!sync_file) {
173 			dma_fence_put(&out_fence->f);
174 			ret = -ENOMEM;
175 			goto out_memdup;
176 		}
177 
178 		exbuf->fence_fd = out_fence_fd;
179 		fd_install(out_fence_fd, sync_file->file);
180 	}
181 
182 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
183 			      vfpriv->ctx_id, buflist, out_fence);
184 	virtio_gpu_notify(vgdev);
185 	return 0;
186 
187 out_unresv:
188 	if (buflist)
189 		virtio_gpu_array_unlock_resv(buflist);
190 out_memdup:
191 	kvfree(buf);
192 out_unused_fd:
193 	kvfree(bo_handles);
194 	if (buflist)
195 		virtio_gpu_array_put_free(buflist);
196 
197 	if (out_fence_fd >= 0)
198 		put_unused_fd(out_fence_fd);
199 
200 	return ret;
201 }
202 
203 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
204 				     struct drm_file *file)
205 {
206 	struct virtio_gpu_device *vgdev = dev->dev_private;
207 	struct drm_virtgpu_getparam *param = data;
208 	int value;
209 
210 	switch (param->param) {
211 	case VIRTGPU_PARAM_3D_FEATURES:
212 		value = vgdev->has_virgl_3d == true ? 1 : 0;
213 		break;
214 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
215 		value = 1;
216 		break;
217 	default:
218 		return -EINVAL;
219 	}
220 	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
221 		return -EFAULT;
222 
223 	return 0;
224 }
225 
226 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
227 					    struct drm_file *file)
228 {
229 	struct virtio_gpu_device *vgdev = dev->dev_private;
230 	struct drm_virtgpu_resource_create *rc = data;
231 	struct virtio_gpu_fence *fence;
232 	int ret;
233 	struct virtio_gpu_object *qobj;
234 	struct drm_gem_object *obj;
235 	uint32_t handle = 0;
236 	struct virtio_gpu_object_params params = { 0 };
237 
238 	if (vgdev->has_virgl_3d) {
239 		virtio_gpu_create_context(dev, file);
240 		params.virgl = true;
241 		params.target = rc->target;
242 		params.bind = rc->bind;
243 		params.depth = rc->depth;
244 		params.array_size = rc->array_size;
245 		params.last_level = rc->last_level;
246 		params.nr_samples = rc->nr_samples;
247 		params.flags = rc->flags;
248 	} else {
249 		if (rc->depth > 1)
250 			return -EINVAL;
251 		if (rc->nr_samples > 1)
252 			return -EINVAL;
253 		if (rc->last_level > 1)
254 			return -EINVAL;
255 		if (rc->target != 2)
256 			return -EINVAL;
257 		if (rc->array_size > 1)
258 			return -EINVAL;
259 	}
260 
261 	params.format = rc->format;
262 	params.width = rc->width;
263 	params.height = rc->height;
264 	params.size = rc->size;
265 	/* allocate a single page size object */
266 	if (params.size == 0)
267 		params.size = PAGE_SIZE;
268 
269 	fence = virtio_gpu_fence_alloc(vgdev);
270 	if (!fence)
271 		return -ENOMEM;
272 	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
273 	dma_fence_put(&fence->f);
274 	if (ret < 0)
275 		return ret;
276 	obj = &qobj->base.base;
277 
278 	ret = drm_gem_handle_create(file, obj, &handle);
279 	if (ret) {
280 		drm_gem_object_release(obj);
281 		return ret;
282 	}
283 	drm_gem_object_put_unlocked(obj);
284 
285 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
286 	rc->bo_handle = handle;
287 	return 0;
288 }
289 
290 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
291 					  struct drm_file *file)
292 {
293 	struct drm_virtgpu_resource_info *ri = data;
294 	struct drm_gem_object *gobj = NULL;
295 	struct virtio_gpu_object *qobj = NULL;
296 
297 	gobj = drm_gem_object_lookup(file, ri->bo_handle);
298 	if (gobj == NULL)
299 		return -ENOENT;
300 
301 	qobj = gem_to_virtio_gpu_obj(gobj);
302 
303 	ri->size = qobj->base.base.size;
304 	ri->res_handle = qobj->hw_res_handle;
305 	drm_gem_object_put_unlocked(gobj);
306 	return 0;
307 }
308 
309 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
310 					       void *data,
311 					       struct drm_file *file)
312 {
313 	struct virtio_gpu_device *vgdev = dev->dev_private;
314 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
315 	struct drm_virtgpu_3d_transfer_from_host *args = data;
316 	struct virtio_gpu_object_array *objs;
317 	struct virtio_gpu_fence *fence;
318 	int ret;
319 	u32 offset = args->offset;
320 
321 	if (vgdev->has_virgl_3d == false)
322 		return -ENOSYS;
323 
324 	virtio_gpu_create_context(dev, file);
325 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
326 	if (objs == NULL)
327 		return -ENOENT;
328 
329 	ret = virtio_gpu_array_lock_resv(objs);
330 	if (ret != 0)
331 		goto err_put_free;
332 
333 	fence = virtio_gpu_fence_alloc(vgdev);
334 	if (!fence) {
335 		ret = -ENOMEM;
336 		goto err_unlock;
337 	}
338 	virtio_gpu_cmd_transfer_from_host_3d
339 		(vgdev, vfpriv->ctx_id, offset, args->level,
340 		 &args->box, objs, fence);
341 	dma_fence_put(&fence->f);
342 	virtio_gpu_notify(vgdev);
343 	return 0;
344 
345 err_unlock:
346 	virtio_gpu_array_unlock_resv(objs);
347 err_put_free:
348 	virtio_gpu_array_put_free(objs);
349 	return ret;
350 }
351 
352 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
353 					     struct drm_file *file)
354 {
355 	struct virtio_gpu_device *vgdev = dev->dev_private;
356 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
357 	struct drm_virtgpu_3d_transfer_to_host *args = data;
358 	struct virtio_gpu_object_array *objs;
359 	struct virtio_gpu_fence *fence;
360 	int ret;
361 	u32 offset = args->offset;
362 
363 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
364 	if (objs == NULL)
365 		return -ENOENT;
366 
367 	if (!vgdev->has_virgl_3d) {
368 		virtio_gpu_cmd_transfer_to_host_2d
369 			(vgdev, offset,
370 			 args->box.w, args->box.h, args->box.x, args->box.y,
371 			 objs, NULL);
372 	} else {
373 		virtio_gpu_create_context(dev, file);
374 		ret = virtio_gpu_array_lock_resv(objs);
375 		if (ret != 0)
376 			goto err_put_free;
377 
378 		ret = -ENOMEM;
379 		fence = virtio_gpu_fence_alloc(vgdev);
380 		if (!fence)
381 			goto err_unlock;
382 
383 		virtio_gpu_cmd_transfer_to_host_3d
384 			(vgdev,
385 			 vfpriv ? vfpriv->ctx_id : 0, offset,
386 			 args->level, &args->box, objs, fence);
387 		dma_fence_put(&fence->f);
388 	}
389 	virtio_gpu_notify(vgdev);
390 	return 0;
391 
392 err_unlock:
393 	virtio_gpu_array_unlock_resv(objs);
394 err_put_free:
395 	virtio_gpu_array_put_free(objs);
396 	return ret;
397 }
398 
399 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
400 				 struct drm_file *file)
401 {
402 	struct drm_virtgpu_3d_wait *args = data;
403 	struct drm_gem_object *obj;
404 	long timeout = 15 * HZ;
405 	int ret;
406 
407 	obj = drm_gem_object_lookup(file, args->handle);
408 	if (obj == NULL)
409 		return -ENOENT;
410 
411 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
412 		ret = dma_resv_test_signaled_rcu(obj->resv, true);
413 	} else {
414 		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
415 						timeout);
416 	}
417 	if (ret == 0)
418 		ret = -EBUSY;
419 	else if (ret > 0)
420 		ret = 0;
421 
422 	drm_gem_object_put_unlocked(obj);
423 	return ret;
424 }
425 
426 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
427 				void *data, struct drm_file *file)
428 {
429 	struct virtio_gpu_device *vgdev = dev->dev_private;
430 	struct drm_virtgpu_get_caps *args = data;
431 	unsigned size, host_caps_size;
432 	int i;
433 	int found_valid = -1;
434 	int ret;
435 	struct virtio_gpu_drv_cap_cache *cache_ent;
436 	void *ptr;
437 
438 	if (vgdev->num_capsets == 0)
439 		return -ENOSYS;
440 
441 	/* don't allow userspace to pass 0 */
442 	if (args->size == 0)
443 		return -EINVAL;
444 
445 	spin_lock(&vgdev->display_info_lock);
446 	for (i = 0; i < vgdev->num_capsets; i++) {
447 		if (vgdev->capsets[i].id == args->cap_set_id) {
448 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
449 				found_valid = i;
450 				break;
451 			}
452 		}
453 	}
454 
455 	if (found_valid == -1) {
456 		spin_unlock(&vgdev->display_info_lock);
457 		return -EINVAL;
458 	}
459 
460 	host_caps_size = vgdev->capsets[found_valid].max_size;
461 	/* only copy to user the minimum of the host caps size or the guest caps size */
462 	size = min(args->size, host_caps_size);
463 
464 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
465 		if (cache_ent->id == args->cap_set_id &&
466 		    cache_ent->version == args->cap_set_ver) {
467 			spin_unlock(&vgdev->display_info_lock);
468 			goto copy_exit;
469 		}
470 	}
471 	spin_unlock(&vgdev->display_info_lock);
472 
473 	/* not in cache - need to talk to hw */
474 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
475 				  &cache_ent);
476 	virtio_gpu_notify(vgdev);
477 
478 copy_exit:
479 	ret = wait_event_timeout(vgdev->resp_wq,
480 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
481 	if (!ret)
482 		return -EBUSY;
483 
484 	/* is_valid check must proceed before copy of the cache entry. */
485 	smp_rmb();
486 
487 	ptr = cache_ent->caps_cache;
488 
489 	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
490 		return -EFAULT;
491 
492 	return 0;
493 }
494 
495 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
496 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
497 			  DRM_RENDER_ALLOW),
498 
499 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
500 			  DRM_RENDER_ALLOW),
501 
502 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
503 			  DRM_RENDER_ALLOW),
504 
505 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
506 			  virtio_gpu_resource_create_ioctl,
507 			  DRM_RENDER_ALLOW),
508 
509 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
510 			  DRM_RENDER_ALLOW),
511 
512 	/* make transfer async to the main ring? - no sure, can we
513 	 * thread these in the underlying GL
514 	 */
515 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
516 			  virtio_gpu_transfer_from_host_ioctl,
517 			  DRM_RENDER_ALLOW),
518 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
519 			  virtio_gpu_transfer_to_host_ioctl,
520 			  DRM_RENDER_ALLOW),
521 
522 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
523 			  DRM_RENDER_ALLOW),
524 
525 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
526 			  DRM_RENDER_ALLOW),
527 };
528