1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31 
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34 
35 #include "virtgpu_drv.h"
36 
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38 				    VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39 				    VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40 
41 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
42 {
43 	struct virtio_gpu_device *vgdev = dev->dev_private;
44 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
45 	char dbgname[TASK_COMM_LEN];
46 
47 	mutex_lock(&vfpriv->context_lock);
48 	if (vfpriv->context_created)
49 		goto out_unlock;
50 
51 	get_task_comm(dbgname, current);
52 	virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
53 				      strlen(dbgname), dbgname);
54 	vfpriv->context_created = true;
55 
56 out_unlock:
57 	mutex_unlock(&vfpriv->context_lock);
58 }
59 
60 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
61 				struct drm_file *file)
62 {
63 	struct virtio_gpu_device *vgdev = dev->dev_private;
64 	struct drm_virtgpu_map *virtio_gpu_map = data;
65 
66 	return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
67 					 virtio_gpu_map->handle,
68 					 &virtio_gpu_map->offset);
69 }
70 
71 /*
72  * Usage of execbuffer:
73  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
74  * However, the command as passed from user space must *not* contain the initial
75  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
76  */
77 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
78 				 struct drm_file *file)
79 {
80 	struct drm_virtgpu_execbuffer *exbuf = data;
81 	struct virtio_gpu_device *vgdev = dev->dev_private;
82 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
83 	struct virtio_gpu_fence *out_fence;
84 	int ret;
85 	uint32_t *bo_handles = NULL;
86 	void __user *user_bo_handles = NULL;
87 	struct virtio_gpu_object_array *buflist = NULL;
88 	struct sync_file *sync_file;
89 	int in_fence_fd = exbuf->fence_fd;
90 	int out_fence_fd = -1;
91 	void *buf;
92 
93 	if (vgdev->has_virgl_3d == false)
94 		return -ENOSYS;
95 
96 	if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
97 		return -EINVAL;
98 
99 	exbuf->fence_fd = -1;
100 
101 	virtio_gpu_create_context(dev, file);
102 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
103 		struct dma_fence *in_fence;
104 
105 		in_fence = sync_file_get_fence(in_fence_fd);
106 
107 		if (!in_fence)
108 			return -EINVAL;
109 
110 		/*
111 		 * Wait if the fence is from a foreign context, or if the fence
112 		 * array contains any fence from a foreign context.
113 		 */
114 		ret = 0;
115 		if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
116 			ret = dma_fence_wait(in_fence, true);
117 
118 		dma_fence_put(in_fence);
119 		if (ret)
120 			return ret;
121 	}
122 
123 	if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
124 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
125 		if (out_fence_fd < 0)
126 			return out_fence_fd;
127 	}
128 
129 	if (exbuf->num_bo_handles) {
130 		bo_handles = kvmalloc_array(exbuf->num_bo_handles,
131 					    sizeof(uint32_t), GFP_KERNEL);
132 		if (!bo_handles) {
133 			ret = -ENOMEM;
134 			goto out_unused_fd;
135 		}
136 
137 		user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
138 		if (copy_from_user(bo_handles, user_bo_handles,
139 				   exbuf->num_bo_handles * sizeof(uint32_t))) {
140 			ret = -EFAULT;
141 			goto out_unused_fd;
142 		}
143 
144 		buflist = virtio_gpu_array_from_handles(file, bo_handles,
145 							exbuf->num_bo_handles);
146 		if (!buflist) {
147 			ret = -ENOENT;
148 			goto out_unused_fd;
149 		}
150 		kvfree(bo_handles);
151 		bo_handles = NULL;
152 	}
153 
154 	buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
155 	if (IS_ERR(buf)) {
156 		ret = PTR_ERR(buf);
157 		goto out_unused_fd;
158 	}
159 
160 	if (buflist) {
161 		ret = virtio_gpu_array_lock_resv(buflist);
162 		if (ret)
163 			goto out_memdup;
164 	}
165 
166 	out_fence = virtio_gpu_fence_alloc(vgdev);
167 	if(!out_fence) {
168 		ret = -ENOMEM;
169 		goto out_unresv;
170 	}
171 
172 	if (out_fence_fd >= 0) {
173 		sync_file = sync_file_create(&out_fence->f);
174 		if (!sync_file) {
175 			dma_fence_put(&out_fence->f);
176 			ret = -ENOMEM;
177 			goto out_memdup;
178 		}
179 
180 		exbuf->fence_fd = out_fence_fd;
181 		fd_install(out_fence_fd, sync_file->file);
182 	}
183 
184 	virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
185 			      vfpriv->ctx_id, buflist, out_fence);
186 	dma_fence_put(&out_fence->f);
187 	virtio_gpu_notify(vgdev);
188 	return 0;
189 
190 out_unresv:
191 	if (buflist)
192 		virtio_gpu_array_unlock_resv(buflist);
193 out_memdup:
194 	kvfree(buf);
195 out_unused_fd:
196 	kvfree(bo_handles);
197 	if (buflist)
198 		virtio_gpu_array_put_free(buflist);
199 
200 	if (out_fence_fd >= 0)
201 		put_unused_fd(out_fence_fd);
202 
203 	return ret;
204 }
205 
206 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
207 				     struct drm_file *file)
208 {
209 	struct virtio_gpu_device *vgdev = dev->dev_private;
210 	struct drm_virtgpu_getparam *param = data;
211 	int value;
212 
213 	switch (param->param) {
214 	case VIRTGPU_PARAM_3D_FEATURES:
215 		value = vgdev->has_virgl_3d == true ? 1 : 0;
216 		break;
217 	case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
218 		value = 1;
219 		break;
220 	default:
221 		return -EINVAL;
222 	}
223 	if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
224 		return -EFAULT;
225 
226 	return 0;
227 }
228 
229 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
230 					    struct drm_file *file)
231 {
232 	struct virtio_gpu_device *vgdev = dev->dev_private;
233 	struct drm_virtgpu_resource_create *rc = data;
234 	struct virtio_gpu_fence *fence;
235 	int ret;
236 	struct virtio_gpu_object *qobj;
237 	struct drm_gem_object *obj;
238 	uint32_t handle = 0;
239 	struct virtio_gpu_object_params params = { 0 };
240 
241 	if (vgdev->has_virgl_3d) {
242 		virtio_gpu_create_context(dev, file);
243 		params.virgl = true;
244 		params.target = rc->target;
245 		params.bind = rc->bind;
246 		params.depth = rc->depth;
247 		params.array_size = rc->array_size;
248 		params.last_level = rc->last_level;
249 		params.nr_samples = rc->nr_samples;
250 		params.flags = rc->flags;
251 	} else {
252 		if (rc->depth > 1)
253 			return -EINVAL;
254 		if (rc->nr_samples > 1)
255 			return -EINVAL;
256 		if (rc->last_level > 1)
257 			return -EINVAL;
258 		if (rc->target != 2)
259 			return -EINVAL;
260 		if (rc->array_size > 1)
261 			return -EINVAL;
262 	}
263 
264 	params.format = rc->format;
265 	params.width = rc->width;
266 	params.height = rc->height;
267 	params.size = rc->size;
268 	/* allocate a single page size object */
269 	if (params.size == 0)
270 		params.size = PAGE_SIZE;
271 
272 	fence = virtio_gpu_fence_alloc(vgdev);
273 	if (!fence)
274 		return -ENOMEM;
275 	ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
276 	dma_fence_put(&fence->f);
277 	if (ret < 0)
278 		return ret;
279 	obj = &qobj->base.base;
280 
281 	ret = drm_gem_handle_create(file, obj, &handle);
282 	if (ret) {
283 		drm_gem_object_release(obj);
284 		return ret;
285 	}
286 	drm_gem_object_put(obj);
287 
288 	rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
289 	rc->bo_handle = handle;
290 	return 0;
291 }
292 
293 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
294 					  struct drm_file *file)
295 {
296 	struct drm_virtgpu_resource_info *ri = data;
297 	struct drm_gem_object *gobj = NULL;
298 	struct virtio_gpu_object *qobj = NULL;
299 
300 	gobj = drm_gem_object_lookup(file, ri->bo_handle);
301 	if (gobj == NULL)
302 		return -ENOENT;
303 
304 	qobj = gem_to_virtio_gpu_obj(gobj);
305 
306 	ri->size = qobj->base.base.size;
307 	ri->res_handle = qobj->hw_res_handle;
308 	if (qobj->host3d_blob || qobj->guest_blob)
309 		ri->blob_mem = qobj->blob_mem;
310 
311 	drm_gem_object_put(gobj);
312 	return 0;
313 }
314 
315 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
316 					       void *data,
317 					       struct drm_file *file)
318 {
319 	struct virtio_gpu_device *vgdev = dev->dev_private;
320 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
321 	struct drm_virtgpu_3d_transfer_from_host *args = data;
322 	struct virtio_gpu_object *bo;
323 	struct virtio_gpu_object_array *objs;
324 	struct virtio_gpu_fence *fence;
325 	int ret;
326 	u32 offset = args->offset;
327 
328 	if (vgdev->has_virgl_3d == false)
329 		return -ENOSYS;
330 
331 	virtio_gpu_create_context(dev, file);
332 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
333 	if (objs == NULL)
334 		return -ENOENT;
335 
336 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
337 	if (bo->guest_blob && !bo->host3d_blob) {
338 		ret = -EINVAL;
339 		goto err_put_free;
340 	}
341 
342 	if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
343 		ret = -EINVAL;
344 		goto err_put_free;
345 	}
346 
347 	ret = virtio_gpu_array_lock_resv(objs);
348 	if (ret != 0)
349 		goto err_put_free;
350 
351 	fence = virtio_gpu_fence_alloc(vgdev);
352 	if (!fence) {
353 		ret = -ENOMEM;
354 		goto err_unlock;
355 	}
356 
357 	virtio_gpu_cmd_transfer_from_host_3d
358 		(vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
359 		 args->layer_stride, &args->box, objs, fence);
360 	dma_fence_put(&fence->f);
361 	virtio_gpu_notify(vgdev);
362 	return 0;
363 
364 err_unlock:
365 	virtio_gpu_array_unlock_resv(objs);
366 err_put_free:
367 	virtio_gpu_array_put_free(objs);
368 	return ret;
369 }
370 
371 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
372 					     struct drm_file *file)
373 {
374 	struct virtio_gpu_device *vgdev = dev->dev_private;
375 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
376 	struct drm_virtgpu_3d_transfer_to_host *args = data;
377 	struct virtio_gpu_object *bo;
378 	struct virtio_gpu_object_array *objs;
379 	struct virtio_gpu_fence *fence;
380 	int ret;
381 	u32 offset = args->offset;
382 
383 	objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
384 	if (objs == NULL)
385 		return -ENOENT;
386 
387 	bo = gem_to_virtio_gpu_obj(objs->objs[0]);
388 	if (bo->guest_blob && !bo->host3d_blob) {
389 		ret = -EINVAL;
390 		goto err_put_free;
391 	}
392 
393 	if (!vgdev->has_virgl_3d) {
394 		virtio_gpu_cmd_transfer_to_host_2d
395 			(vgdev, offset,
396 			 args->box.w, args->box.h, args->box.x, args->box.y,
397 			 objs, NULL);
398 	} else {
399 		virtio_gpu_create_context(dev, file);
400 
401 		if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
402 			ret = -EINVAL;
403 			goto err_put_free;
404 		}
405 
406 		ret = virtio_gpu_array_lock_resv(objs);
407 		if (ret != 0)
408 			goto err_put_free;
409 
410 		ret = -ENOMEM;
411 		fence = virtio_gpu_fence_alloc(vgdev);
412 		if (!fence)
413 			goto err_unlock;
414 
415 		virtio_gpu_cmd_transfer_to_host_3d
416 			(vgdev,
417 			 vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
418 			 args->stride, args->layer_stride, &args->box, objs,
419 			 fence);
420 		dma_fence_put(&fence->f);
421 	}
422 	virtio_gpu_notify(vgdev);
423 	return 0;
424 
425 err_unlock:
426 	virtio_gpu_array_unlock_resv(objs);
427 err_put_free:
428 	virtio_gpu_array_put_free(objs);
429 	return ret;
430 }
431 
432 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
433 				 struct drm_file *file)
434 {
435 	struct drm_virtgpu_3d_wait *args = data;
436 	struct drm_gem_object *obj;
437 	long timeout = 15 * HZ;
438 	int ret;
439 
440 	obj = drm_gem_object_lookup(file, args->handle);
441 	if (obj == NULL)
442 		return -ENOENT;
443 
444 	if (args->flags & VIRTGPU_WAIT_NOWAIT) {
445 		ret = dma_resv_test_signaled_rcu(obj->resv, true);
446 	} else {
447 		ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
448 						timeout);
449 	}
450 	if (ret == 0)
451 		ret = -EBUSY;
452 	else if (ret > 0)
453 		ret = 0;
454 
455 	drm_gem_object_put(obj);
456 	return ret;
457 }
458 
459 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
460 				void *data, struct drm_file *file)
461 {
462 	struct virtio_gpu_device *vgdev = dev->dev_private;
463 	struct drm_virtgpu_get_caps *args = data;
464 	unsigned size, host_caps_size;
465 	int i;
466 	int found_valid = -1;
467 	int ret;
468 	struct virtio_gpu_drv_cap_cache *cache_ent;
469 	void *ptr;
470 
471 	if (vgdev->num_capsets == 0)
472 		return -ENOSYS;
473 
474 	/* don't allow userspace to pass 0 */
475 	if (args->size == 0)
476 		return -EINVAL;
477 
478 	spin_lock(&vgdev->display_info_lock);
479 	for (i = 0; i < vgdev->num_capsets; i++) {
480 		if (vgdev->capsets[i].id == args->cap_set_id) {
481 			if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
482 				found_valid = i;
483 				break;
484 			}
485 		}
486 	}
487 
488 	if (found_valid == -1) {
489 		spin_unlock(&vgdev->display_info_lock);
490 		return -EINVAL;
491 	}
492 
493 	host_caps_size = vgdev->capsets[found_valid].max_size;
494 	/* only copy to user the minimum of the host caps size or the guest caps size */
495 	size = min(args->size, host_caps_size);
496 
497 	list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
498 		if (cache_ent->id == args->cap_set_id &&
499 		    cache_ent->version == args->cap_set_ver) {
500 			spin_unlock(&vgdev->display_info_lock);
501 			goto copy_exit;
502 		}
503 	}
504 	spin_unlock(&vgdev->display_info_lock);
505 
506 	/* not in cache - need to talk to hw */
507 	virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
508 				  &cache_ent);
509 	virtio_gpu_notify(vgdev);
510 
511 copy_exit:
512 	ret = wait_event_timeout(vgdev->resp_wq,
513 				 atomic_read(&cache_ent->is_valid), 5 * HZ);
514 	if (!ret)
515 		return -EBUSY;
516 
517 	/* is_valid check must proceed before copy of the cache entry. */
518 	smp_rmb();
519 
520 	ptr = cache_ent->caps_cache;
521 
522 	if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
523 		return -EFAULT;
524 
525 	return 0;
526 }
527 
528 static int verify_blob(struct virtio_gpu_device *vgdev,
529 		       struct virtio_gpu_fpriv *vfpriv,
530 		       struct virtio_gpu_object_params *params,
531 		       struct drm_virtgpu_resource_create_blob *rc_blob,
532 		       bool *guest_blob, bool *host3d_blob)
533 {
534 	if (!vgdev->has_resource_blob)
535 		return -EINVAL;
536 
537 	if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
538 	    !rc_blob->blob_flags)
539 		return -EINVAL;
540 
541 	if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
542 		if (!vgdev->has_resource_assign_uuid)
543 			return -EINVAL;
544 	}
545 
546 	switch (rc_blob->blob_mem) {
547 	case VIRTGPU_BLOB_MEM_GUEST:
548 		*guest_blob = true;
549 		break;
550 	case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
551 		*guest_blob = true;
552 		fallthrough;
553 	case VIRTGPU_BLOB_MEM_HOST3D:
554 		*host3d_blob = true;
555 		break;
556 	default:
557 		return -EINVAL;
558 	}
559 
560 	if (*host3d_blob) {
561 		if (!vgdev->has_virgl_3d)
562 			return -EINVAL;
563 
564 		/* Must be dword aligned. */
565 		if (rc_blob->cmd_size % 4 != 0)
566 			return -EINVAL;
567 
568 		params->ctx_id = vfpriv->ctx_id;
569 		params->blob_id = rc_blob->blob_id;
570 	} else {
571 		if (rc_blob->blob_id != 0)
572 			return -EINVAL;
573 
574 		if (rc_blob->cmd_size != 0)
575 			return -EINVAL;
576 	}
577 
578 	params->blob_mem = rc_blob->blob_mem;
579 	params->size = rc_blob->size;
580 	params->blob = true;
581 	params->blob_flags = rc_blob->blob_flags;
582 	return 0;
583 }
584 
585 static int virtio_gpu_resource_create_blob(struct drm_device *dev,
586 					   void *data, struct drm_file *file)
587 {
588 	int ret = 0;
589 	uint32_t handle = 0;
590 	bool guest_blob = false;
591 	bool host3d_blob = false;
592 	struct drm_gem_object *obj;
593 	struct virtio_gpu_object *bo;
594 	struct virtio_gpu_object_params params = { 0 };
595 	struct virtio_gpu_device *vgdev = dev->dev_private;
596 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
597 	struct drm_virtgpu_resource_create_blob *rc_blob = data;
598 
599 	if (verify_blob(vgdev, vfpriv, &params, rc_blob,
600 			&guest_blob, &host3d_blob))
601 		return -EINVAL;
602 
603 	if (vgdev->has_virgl_3d)
604 		virtio_gpu_create_context(dev, file);
605 
606 	if (rc_blob->cmd_size) {
607 		void *buf;
608 
609 		buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
610 				  rc_blob->cmd_size);
611 
612 		if (IS_ERR(buf))
613 			return PTR_ERR(buf);
614 
615 		virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
616 				      vfpriv->ctx_id, NULL, NULL);
617 	}
618 
619 	if (guest_blob)
620 		ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
621 	else if (!guest_blob && host3d_blob)
622 		ret = virtio_gpu_vram_create(vgdev, &params, &bo);
623 	else
624 		return -EINVAL;
625 
626 	if (ret < 0)
627 		return ret;
628 
629 	bo->guest_blob = guest_blob;
630 	bo->host3d_blob = host3d_blob;
631 	bo->blob_mem = rc_blob->blob_mem;
632 	bo->blob_flags = rc_blob->blob_flags;
633 
634 	obj = &bo->base.base;
635 	if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
636 		ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
637 		if (ret) {
638 			drm_gem_object_release(obj);
639 			return ret;
640 		}
641 	}
642 
643 	ret = drm_gem_handle_create(file, obj, &handle);
644 	if (ret) {
645 		drm_gem_object_release(obj);
646 		return ret;
647 	}
648 	drm_gem_object_put(obj);
649 
650 	rc_blob->res_handle = bo->hw_res_handle;
651 	rc_blob->bo_handle = handle;
652 
653 	return 0;
654 }
655 
656 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
657 	DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
658 			  DRM_RENDER_ALLOW),
659 
660 	DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
661 			  DRM_RENDER_ALLOW),
662 
663 	DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
664 			  DRM_RENDER_ALLOW),
665 
666 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
667 			  virtio_gpu_resource_create_ioctl,
668 			  DRM_RENDER_ALLOW),
669 
670 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
671 			  DRM_RENDER_ALLOW),
672 
673 	/* make transfer async to the main ring? - no sure, can we
674 	 * thread these in the underlying GL
675 	 */
676 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
677 			  virtio_gpu_transfer_from_host_ioctl,
678 			  DRM_RENDER_ALLOW),
679 	DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
680 			  virtio_gpu_transfer_to_host_ioctl,
681 			  DRM_RENDER_ALLOW),
682 
683 	DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
684 			  DRM_RENDER_ALLOW),
685 
686 	DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
687 			  DRM_RENDER_ALLOW),
688 
689 	DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
690 			  virtio_gpu_resource_create_blob,
691 			  DRM_RENDER_ALLOW),
692 };
693