1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie 7 * Alon Levy 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <linux/file.h> 29 #include <linux/sync_file.h> 30 #include <linux/uaccess.h> 31 32 #include <drm/drm_file.h> 33 #include <drm/virtgpu_drm.h> 34 35 #include "virtgpu_drv.h" 36 37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \ 38 VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ 39 VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) 40 41 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) 42 { 43 struct virtio_gpu_device *vgdev = dev->dev_private; 44 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 45 char dbgname[TASK_COMM_LEN]; 46 47 mutex_lock(&vfpriv->context_lock); 48 if (vfpriv->context_created) 49 goto out_unlock; 50 51 get_task_comm(dbgname, current); 52 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, 53 strlen(dbgname), dbgname); 54 vfpriv->context_created = true; 55 56 out_unlock: 57 mutex_unlock(&vfpriv->context_lock); 58 } 59 60 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, 61 struct drm_file *file) 62 { 63 struct virtio_gpu_device *vgdev = dev->dev_private; 64 struct drm_virtgpu_map *virtio_gpu_map = data; 65 66 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev, 67 virtio_gpu_map->handle, 68 &virtio_gpu_map->offset); 69 } 70 71 /* 72 * Usage of execbuffer: 73 * Relocations need to take into account the full VIRTIO_GPUDrawable size. 74 * However, the command as passed from user space must *not* contain the initial 75 * VIRTIO_GPUReleaseInfo struct (first XXX bytes) 76 */ 77 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 78 struct drm_file *file) 79 { 80 struct drm_virtgpu_execbuffer *exbuf = data; 81 struct virtio_gpu_device *vgdev = dev->dev_private; 82 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 83 struct virtio_gpu_fence *out_fence; 84 int ret; 85 uint32_t *bo_handles = NULL; 86 void __user *user_bo_handles = NULL; 87 struct virtio_gpu_object_array *buflist = NULL; 88 struct sync_file *sync_file; 89 int in_fence_fd = exbuf->fence_fd; 90 int out_fence_fd = -1; 91 void *buf; 92 93 if (vgdev->has_virgl_3d == false) 94 return -ENOSYS; 95 96 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) 97 return -EINVAL; 98 99 exbuf->fence_fd = -1; 100 101 virtio_gpu_create_context(dev, file); 102 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { 103 struct dma_fence *in_fence; 104 105 in_fence = sync_file_get_fence(in_fence_fd); 106 107 if (!in_fence) 108 return -EINVAL; 109 110 /* 111 * Wait if the fence is from a foreign context, or if the fence 112 * array contains any fence from a foreign context. 113 */ 114 ret = 0; 115 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context)) 116 ret = dma_fence_wait(in_fence, true); 117 118 dma_fence_put(in_fence); 119 if (ret) 120 return ret; 121 } 122 123 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { 124 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 125 if (out_fence_fd < 0) 126 return out_fence_fd; 127 } 128 129 if (exbuf->num_bo_handles) { 130 bo_handles = kvmalloc_array(exbuf->num_bo_handles, 131 sizeof(uint32_t), GFP_KERNEL); 132 if (!bo_handles) { 133 ret = -ENOMEM; 134 goto out_unused_fd; 135 } 136 137 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); 138 if (copy_from_user(bo_handles, user_bo_handles, 139 exbuf->num_bo_handles * sizeof(uint32_t))) { 140 ret = -EFAULT; 141 goto out_unused_fd; 142 } 143 144 buflist = virtio_gpu_array_from_handles(file, bo_handles, 145 exbuf->num_bo_handles); 146 if (!buflist) { 147 ret = -ENOENT; 148 goto out_unused_fd; 149 } 150 kvfree(bo_handles); 151 bo_handles = NULL; 152 } 153 154 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); 155 if (IS_ERR(buf)) { 156 ret = PTR_ERR(buf); 157 goto out_unused_fd; 158 } 159 160 if (buflist) { 161 ret = virtio_gpu_array_lock_resv(buflist); 162 if (ret) 163 goto out_memdup; 164 } 165 166 out_fence = virtio_gpu_fence_alloc(vgdev); 167 if(!out_fence) { 168 ret = -ENOMEM; 169 goto out_unresv; 170 } 171 172 if (out_fence_fd >= 0) { 173 sync_file = sync_file_create(&out_fence->f); 174 if (!sync_file) { 175 dma_fence_put(&out_fence->f); 176 ret = -ENOMEM; 177 goto out_memdup; 178 } 179 180 exbuf->fence_fd = out_fence_fd; 181 fd_install(out_fence_fd, sync_file->file); 182 } 183 184 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 185 vfpriv->ctx_id, buflist, out_fence); 186 dma_fence_put(&out_fence->f); 187 virtio_gpu_notify(vgdev); 188 return 0; 189 190 out_unresv: 191 if (buflist) 192 virtio_gpu_array_unlock_resv(buflist); 193 out_memdup: 194 kvfree(buf); 195 out_unused_fd: 196 kvfree(bo_handles); 197 if (buflist) 198 virtio_gpu_array_put_free(buflist); 199 200 if (out_fence_fd >= 0) 201 put_unused_fd(out_fence_fd); 202 203 return ret; 204 } 205 206 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, 207 struct drm_file *file) 208 { 209 struct virtio_gpu_device *vgdev = dev->dev_private; 210 struct drm_virtgpu_getparam *param = data; 211 int value; 212 213 switch (param->param) { 214 case VIRTGPU_PARAM_3D_FEATURES: 215 value = vgdev->has_virgl_3d ? 1 : 0; 216 break; 217 case VIRTGPU_PARAM_CAPSET_QUERY_FIX: 218 value = 1; 219 break; 220 case VIRTGPU_PARAM_RESOURCE_BLOB: 221 value = vgdev->has_resource_blob ? 1 : 0; 222 break; 223 case VIRTGPU_PARAM_HOST_VISIBLE: 224 value = vgdev->has_host_visible ? 1 : 0; 225 break; 226 case VIRTGPU_PARAM_CROSS_DEVICE: 227 value = vgdev->has_resource_assign_uuid ? 1 : 0; 228 break; 229 default: 230 return -EINVAL; 231 } 232 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int))) 233 return -EFAULT; 234 235 return 0; 236 } 237 238 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, 239 struct drm_file *file) 240 { 241 struct virtio_gpu_device *vgdev = dev->dev_private; 242 struct drm_virtgpu_resource_create *rc = data; 243 struct virtio_gpu_fence *fence; 244 int ret; 245 struct virtio_gpu_object *qobj; 246 struct drm_gem_object *obj; 247 uint32_t handle = 0; 248 struct virtio_gpu_object_params params = { 0 }; 249 250 if (vgdev->has_virgl_3d) { 251 virtio_gpu_create_context(dev, file); 252 params.virgl = true; 253 params.target = rc->target; 254 params.bind = rc->bind; 255 params.depth = rc->depth; 256 params.array_size = rc->array_size; 257 params.last_level = rc->last_level; 258 params.nr_samples = rc->nr_samples; 259 params.flags = rc->flags; 260 } else { 261 if (rc->depth > 1) 262 return -EINVAL; 263 if (rc->nr_samples > 1) 264 return -EINVAL; 265 if (rc->last_level > 1) 266 return -EINVAL; 267 if (rc->target != 2) 268 return -EINVAL; 269 if (rc->array_size > 1) 270 return -EINVAL; 271 } 272 273 params.format = rc->format; 274 params.width = rc->width; 275 params.height = rc->height; 276 params.size = rc->size; 277 /* allocate a single page size object */ 278 if (params.size == 0) 279 params.size = PAGE_SIZE; 280 281 fence = virtio_gpu_fence_alloc(vgdev); 282 if (!fence) 283 return -ENOMEM; 284 ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); 285 dma_fence_put(&fence->f); 286 if (ret < 0) 287 return ret; 288 obj = &qobj->base.base; 289 290 ret = drm_gem_handle_create(file, obj, &handle); 291 if (ret) { 292 drm_gem_object_release(obj); 293 return ret; 294 } 295 drm_gem_object_put(obj); 296 297 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ 298 rc->bo_handle = handle; 299 return 0; 300 } 301 302 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, 303 struct drm_file *file) 304 { 305 struct drm_virtgpu_resource_info *ri = data; 306 struct drm_gem_object *gobj = NULL; 307 struct virtio_gpu_object *qobj = NULL; 308 309 gobj = drm_gem_object_lookup(file, ri->bo_handle); 310 if (gobj == NULL) 311 return -ENOENT; 312 313 qobj = gem_to_virtio_gpu_obj(gobj); 314 315 ri->size = qobj->base.base.size; 316 ri->res_handle = qobj->hw_res_handle; 317 if (qobj->host3d_blob || qobj->guest_blob) 318 ri->blob_mem = qobj->blob_mem; 319 320 drm_gem_object_put(gobj); 321 return 0; 322 } 323 324 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, 325 void *data, 326 struct drm_file *file) 327 { 328 struct virtio_gpu_device *vgdev = dev->dev_private; 329 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 330 struct drm_virtgpu_3d_transfer_from_host *args = data; 331 struct virtio_gpu_object *bo; 332 struct virtio_gpu_object_array *objs; 333 struct virtio_gpu_fence *fence; 334 int ret; 335 u32 offset = args->offset; 336 337 if (vgdev->has_virgl_3d == false) 338 return -ENOSYS; 339 340 virtio_gpu_create_context(dev, file); 341 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); 342 if (objs == NULL) 343 return -ENOENT; 344 345 bo = gem_to_virtio_gpu_obj(objs->objs[0]); 346 if (bo->guest_blob && !bo->host3d_blob) { 347 ret = -EINVAL; 348 goto err_put_free; 349 } 350 351 if (!bo->host3d_blob && (args->stride || args->layer_stride)) { 352 ret = -EINVAL; 353 goto err_put_free; 354 } 355 356 ret = virtio_gpu_array_lock_resv(objs); 357 if (ret != 0) 358 goto err_put_free; 359 360 fence = virtio_gpu_fence_alloc(vgdev); 361 if (!fence) { 362 ret = -ENOMEM; 363 goto err_unlock; 364 } 365 366 virtio_gpu_cmd_transfer_from_host_3d 367 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride, 368 args->layer_stride, &args->box, objs, fence); 369 dma_fence_put(&fence->f); 370 virtio_gpu_notify(vgdev); 371 return 0; 372 373 err_unlock: 374 virtio_gpu_array_unlock_resv(objs); 375 err_put_free: 376 virtio_gpu_array_put_free(objs); 377 return ret; 378 } 379 380 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, 381 struct drm_file *file) 382 { 383 struct virtio_gpu_device *vgdev = dev->dev_private; 384 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 385 struct drm_virtgpu_3d_transfer_to_host *args = data; 386 struct virtio_gpu_object *bo; 387 struct virtio_gpu_object_array *objs; 388 struct virtio_gpu_fence *fence; 389 int ret; 390 u32 offset = args->offset; 391 392 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); 393 if (objs == NULL) 394 return -ENOENT; 395 396 bo = gem_to_virtio_gpu_obj(objs->objs[0]); 397 if (bo->guest_blob && !bo->host3d_blob) { 398 ret = -EINVAL; 399 goto err_put_free; 400 } 401 402 if (!vgdev->has_virgl_3d) { 403 virtio_gpu_cmd_transfer_to_host_2d 404 (vgdev, offset, 405 args->box.w, args->box.h, args->box.x, args->box.y, 406 objs, NULL); 407 } else { 408 virtio_gpu_create_context(dev, file); 409 410 if (!bo->host3d_blob && (args->stride || args->layer_stride)) { 411 ret = -EINVAL; 412 goto err_put_free; 413 } 414 415 ret = virtio_gpu_array_lock_resv(objs); 416 if (ret != 0) 417 goto err_put_free; 418 419 ret = -ENOMEM; 420 fence = virtio_gpu_fence_alloc(vgdev); 421 if (!fence) 422 goto err_unlock; 423 424 virtio_gpu_cmd_transfer_to_host_3d 425 (vgdev, 426 vfpriv ? vfpriv->ctx_id : 0, offset, args->level, 427 args->stride, args->layer_stride, &args->box, objs, 428 fence); 429 dma_fence_put(&fence->f); 430 } 431 virtio_gpu_notify(vgdev); 432 return 0; 433 434 err_unlock: 435 virtio_gpu_array_unlock_resv(objs); 436 err_put_free: 437 virtio_gpu_array_put_free(objs); 438 return ret; 439 } 440 441 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, 442 struct drm_file *file) 443 { 444 struct drm_virtgpu_3d_wait *args = data; 445 struct drm_gem_object *obj; 446 long timeout = 15 * HZ; 447 int ret; 448 449 obj = drm_gem_object_lookup(file, args->handle); 450 if (obj == NULL) 451 return -ENOENT; 452 453 if (args->flags & VIRTGPU_WAIT_NOWAIT) { 454 ret = dma_resv_test_signaled_rcu(obj->resv, true); 455 } else { 456 ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, 457 timeout); 458 } 459 if (ret == 0) 460 ret = -EBUSY; 461 else if (ret > 0) 462 ret = 0; 463 464 drm_gem_object_put(obj); 465 return ret; 466 } 467 468 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, 469 void *data, struct drm_file *file) 470 { 471 struct virtio_gpu_device *vgdev = dev->dev_private; 472 struct drm_virtgpu_get_caps *args = data; 473 unsigned size, host_caps_size; 474 int i; 475 int found_valid = -1; 476 int ret; 477 struct virtio_gpu_drv_cap_cache *cache_ent; 478 void *ptr; 479 480 if (vgdev->num_capsets == 0) 481 return -ENOSYS; 482 483 /* don't allow userspace to pass 0 */ 484 if (args->size == 0) 485 return -EINVAL; 486 487 spin_lock(&vgdev->display_info_lock); 488 for (i = 0; i < vgdev->num_capsets; i++) { 489 if (vgdev->capsets[i].id == args->cap_set_id) { 490 if (vgdev->capsets[i].max_version >= args->cap_set_ver) { 491 found_valid = i; 492 break; 493 } 494 } 495 } 496 497 if (found_valid == -1) { 498 spin_unlock(&vgdev->display_info_lock); 499 return -EINVAL; 500 } 501 502 host_caps_size = vgdev->capsets[found_valid].max_size; 503 /* only copy to user the minimum of the host caps size or the guest caps size */ 504 size = min(args->size, host_caps_size); 505 506 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 507 if (cache_ent->id == args->cap_set_id && 508 cache_ent->version == args->cap_set_ver) { 509 spin_unlock(&vgdev->display_info_lock); 510 goto copy_exit; 511 } 512 } 513 spin_unlock(&vgdev->display_info_lock); 514 515 /* not in cache - need to talk to hw */ 516 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, 517 &cache_ent); 518 virtio_gpu_notify(vgdev); 519 520 copy_exit: 521 ret = wait_event_timeout(vgdev->resp_wq, 522 atomic_read(&cache_ent->is_valid), 5 * HZ); 523 if (!ret) 524 return -EBUSY; 525 526 /* is_valid check must proceed before copy of the cache entry. */ 527 smp_rmb(); 528 529 ptr = cache_ent->caps_cache; 530 531 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size)) 532 return -EFAULT; 533 534 return 0; 535 } 536 537 static int verify_blob(struct virtio_gpu_device *vgdev, 538 struct virtio_gpu_fpriv *vfpriv, 539 struct virtio_gpu_object_params *params, 540 struct drm_virtgpu_resource_create_blob *rc_blob, 541 bool *guest_blob, bool *host3d_blob) 542 { 543 if (!vgdev->has_resource_blob) 544 return -EINVAL; 545 546 if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) || 547 !rc_blob->blob_flags) 548 return -EINVAL; 549 550 if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { 551 if (!vgdev->has_resource_assign_uuid) 552 return -EINVAL; 553 } 554 555 switch (rc_blob->blob_mem) { 556 case VIRTGPU_BLOB_MEM_GUEST: 557 *guest_blob = true; 558 break; 559 case VIRTGPU_BLOB_MEM_HOST3D_GUEST: 560 *guest_blob = true; 561 fallthrough; 562 case VIRTGPU_BLOB_MEM_HOST3D: 563 *host3d_blob = true; 564 break; 565 default: 566 return -EINVAL; 567 } 568 569 if (*host3d_blob) { 570 if (!vgdev->has_virgl_3d) 571 return -EINVAL; 572 573 /* Must be dword aligned. */ 574 if (rc_blob->cmd_size % 4 != 0) 575 return -EINVAL; 576 577 params->ctx_id = vfpriv->ctx_id; 578 params->blob_id = rc_blob->blob_id; 579 } else { 580 if (rc_blob->blob_id != 0) 581 return -EINVAL; 582 583 if (rc_blob->cmd_size != 0) 584 return -EINVAL; 585 } 586 587 params->blob_mem = rc_blob->blob_mem; 588 params->size = rc_blob->size; 589 params->blob = true; 590 params->blob_flags = rc_blob->blob_flags; 591 return 0; 592 } 593 594 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, 595 void *data, 596 struct drm_file *file) 597 { 598 int ret = 0; 599 uint32_t handle = 0; 600 bool guest_blob = false; 601 bool host3d_blob = false; 602 struct drm_gem_object *obj; 603 struct virtio_gpu_object *bo; 604 struct virtio_gpu_object_params params = { 0 }; 605 struct virtio_gpu_device *vgdev = dev->dev_private; 606 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 607 struct drm_virtgpu_resource_create_blob *rc_blob = data; 608 609 if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob, 610 &guest_blob, &host3d_blob)) 611 return -EINVAL; 612 613 if (vgdev->has_virgl_3d) 614 virtio_gpu_create_context(dev, file); 615 616 if (rc_blob->cmd_size) { 617 void *buf; 618 619 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd), 620 rc_blob->cmd_size); 621 622 if (IS_ERR(buf)) 623 return PTR_ERR(buf); 624 625 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size, 626 vfpriv->ctx_id, NULL, NULL); 627 } 628 629 if (guest_blob) 630 ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL); 631 else if (!guest_blob && host3d_blob) 632 ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo); 633 else 634 return -EINVAL; 635 636 if (ret < 0) 637 return ret; 638 639 bo->guest_blob = guest_blob; 640 bo->host3d_blob = host3d_blob; 641 bo->blob_mem = rc_blob->blob_mem; 642 bo->blob_flags = rc_blob->blob_flags; 643 644 obj = &bo->base.base; 645 if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { 646 ret = virtio_gpu_resource_assign_uuid(vgdev, bo); 647 if (ret) { 648 drm_gem_object_release(obj); 649 return ret; 650 } 651 } 652 653 ret = drm_gem_handle_create(file, obj, &handle); 654 if (ret) { 655 drm_gem_object_release(obj); 656 return ret; 657 } 658 drm_gem_object_put(obj); 659 660 rc_blob->res_handle = bo->hw_res_handle; 661 rc_blob->bo_handle = handle; 662 663 return 0; 664 } 665 666 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { 667 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, 668 DRM_RENDER_ALLOW), 669 670 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, 671 DRM_RENDER_ALLOW), 672 673 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, 674 DRM_RENDER_ALLOW), 675 676 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, 677 virtio_gpu_resource_create_ioctl, 678 DRM_RENDER_ALLOW), 679 680 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, 681 DRM_RENDER_ALLOW), 682 683 /* make transfer async to the main ring? - no sure, can we 684 * thread these in the underlying GL 685 */ 686 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, 687 virtio_gpu_transfer_from_host_ioctl, 688 DRM_RENDER_ALLOW), 689 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, 690 virtio_gpu_transfer_to_host_ioctl, 691 DRM_RENDER_ALLOW), 692 693 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, 694 DRM_RENDER_ALLOW), 695 696 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, 697 DRM_RENDER_ALLOW), 698 699 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, 700 virtio_gpu_resource_create_blob_ioctl, 701 DRM_RENDER_ALLOW), 702 }; 703