1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Authors: 6 * Dave Airlie 7 * Alon Levy 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <linux/file.h> 29 #include <linux/sync_file.h> 30 31 #include <drm/drm_file.h> 32 #include <drm/ttm/ttm_execbuf_util.h> 33 #include <drm/virtgpu_drm.h> 34 35 #include "virtgpu_drv.h" 36 37 static void convert_to_hw_box(struct virtio_gpu_box *dst, 38 const struct drm_virtgpu_3d_box *src) 39 { 40 dst->x = cpu_to_le32(src->x); 41 dst->y = cpu_to_le32(src->y); 42 dst->z = cpu_to_le32(src->z); 43 dst->w = cpu_to_le32(src->w); 44 dst->h = cpu_to_le32(src->h); 45 dst->d = cpu_to_le32(src->d); 46 } 47 48 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, 49 struct drm_file *file_priv) 50 { 51 struct virtio_gpu_device *vgdev = dev->dev_private; 52 struct drm_virtgpu_map *virtio_gpu_map = data; 53 54 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev, 55 virtio_gpu_map->handle, 56 &virtio_gpu_map->offset); 57 } 58 59 /* 60 * Usage of execbuffer: 61 * Relocations need to take into account the full VIRTIO_GPUDrawable size. 62 * However, the command as passed from user space must *not* contain the initial 63 * VIRTIO_GPUReleaseInfo struct (first XXX bytes) 64 */ 65 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, 66 struct drm_file *drm_file) 67 { 68 struct drm_virtgpu_execbuffer *exbuf = data; 69 struct virtio_gpu_device *vgdev = dev->dev_private; 70 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; 71 struct virtio_gpu_fence *out_fence; 72 int ret; 73 uint32_t *bo_handles = NULL; 74 void __user *user_bo_handles = NULL; 75 struct virtio_gpu_object_array *buflist = NULL; 76 struct sync_file *sync_file; 77 int in_fence_fd = exbuf->fence_fd; 78 int out_fence_fd = -1; 79 void *buf; 80 81 if (vgdev->has_virgl_3d == false) 82 return -ENOSYS; 83 84 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) 85 return -EINVAL; 86 87 exbuf->fence_fd = -1; 88 89 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { 90 struct dma_fence *in_fence; 91 92 in_fence = sync_file_get_fence(in_fence_fd); 93 94 if (!in_fence) 95 return -EINVAL; 96 97 /* 98 * Wait if the fence is from a foreign context, or if the fence 99 * array contains any fence from a foreign context. 100 */ 101 ret = 0; 102 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context)) 103 ret = dma_fence_wait(in_fence, true); 104 105 dma_fence_put(in_fence); 106 if (ret) 107 return ret; 108 } 109 110 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { 111 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 112 if (out_fence_fd < 0) 113 return out_fence_fd; 114 } 115 116 if (exbuf->num_bo_handles) { 117 bo_handles = kvmalloc_array(exbuf->num_bo_handles, 118 sizeof(uint32_t), GFP_KERNEL); 119 if (!bo_handles) { 120 ret = -ENOMEM; 121 goto out_unused_fd; 122 } 123 124 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); 125 if (copy_from_user(bo_handles, user_bo_handles, 126 exbuf->num_bo_handles * sizeof(uint32_t))) { 127 ret = -EFAULT; 128 goto out_unused_fd; 129 } 130 131 buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, 132 exbuf->num_bo_handles); 133 if (!buflist) { 134 ret = -ENOENT; 135 goto out_unused_fd; 136 } 137 kvfree(bo_handles); 138 bo_handles = NULL; 139 } 140 141 if (buflist) { 142 ret = virtio_gpu_array_lock_resv(buflist); 143 if (ret) 144 goto out_unused_fd; 145 } 146 147 buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); 148 if (IS_ERR(buf)) { 149 ret = PTR_ERR(buf); 150 goto out_unresv; 151 } 152 153 out_fence = virtio_gpu_fence_alloc(vgdev); 154 if(!out_fence) { 155 ret = -ENOMEM; 156 goto out_memdup; 157 } 158 159 if (out_fence_fd >= 0) { 160 sync_file = sync_file_create(&out_fence->f); 161 if (!sync_file) { 162 dma_fence_put(&out_fence->f); 163 ret = -ENOMEM; 164 goto out_memdup; 165 } 166 167 exbuf->fence_fd = out_fence_fd; 168 fd_install(out_fence_fd, sync_file->file); 169 } 170 171 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, 172 vfpriv->ctx_id, buflist, out_fence); 173 return 0; 174 175 out_memdup: 176 kfree(buf); 177 out_unresv: 178 if (buflist) 179 virtio_gpu_array_unlock_resv(buflist); 180 out_unused_fd: 181 kvfree(bo_handles); 182 if (buflist) 183 virtio_gpu_array_put_free(buflist); 184 185 if (out_fence_fd >= 0) 186 put_unused_fd(out_fence_fd); 187 188 return ret; 189 } 190 191 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, 192 struct drm_file *file_priv) 193 { 194 struct virtio_gpu_device *vgdev = dev->dev_private; 195 struct drm_virtgpu_getparam *param = data; 196 int value; 197 198 switch (param->param) { 199 case VIRTGPU_PARAM_3D_FEATURES: 200 value = vgdev->has_virgl_3d == true ? 1 : 0; 201 break; 202 case VIRTGPU_PARAM_CAPSET_QUERY_FIX: 203 value = 1; 204 break; 205 default: 206 return -EINVAL; 207 } 208 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int))) 209 return -EFAULT; 210 211 return 0; 212 } 213 214 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, 215 struct drm_file *file_priv) 216 { 217 struct virtio_gpu_device *vgdev = dev->dev_private; 218 struct drm_virtgpu_resource_create *rc = data; 219 struct virtio_gpu_fence *fence; 220 int ret; 221 struct virtio_gpu_object *qobj; 222 struct drm_gem_object *obj; 223 uint32_t handle = 0; 224 struct virtio_gpu_object_params params = { 0 }; 225 226 if (vgdev->has_virgl_3d == false) { 227 if (rc->depth > 1) 228 return -EINVAL; 229 if (rc->nr_samples > 1) 230 return -EINVAL; 231 if (rc->last_level > 1) 232 return -EINVAL; 233 if (rc->target != 2) 234 return -EINVAL; 235 if (rc->array_size > 1) 236 return -EINVAL; 237 } 238 239 params.format = rc->format; 240 params.width = rc->width; 241 params.height = rc->height; 242 params.size = rc->size; 243 if (vgdev->has_virgl_3d) { 244 params.virgl = true; 245 params.target = rc->target; 246 params.bind = rc->bind; 247 params.depth = rc->depth; 248 params.array_size = rc->array_size; 249 params.last_level = rc->last_level; 250 params.nr_samples = rc->nr_samples; 251 params.flags = rc->flags; 252 } 253 /* allocate a single page size object */ 254 if (params.size == 0) 255 params.size = PAGE_SIZE; 256 257 fence = virtio_gpu_fence_alloc(vgdev); 258 if (!fence) 259 return -ENOMEM; 260 qobj = virtio_gpu_alloc_object(dev, ¶ms, fence); 261 dma_fence_put(&fence->f); 262 if (IS_ERR(qobj)) 263 return PTR_ERR(qobj); 264 obj = &qobj->gem_base; 265 266 ret = drm_gem_handle_create(file_priv, obj, &handle); 267 if (ret) { 268 drm_gem_object_release(obj); 269 return ret; 270 } 271 drm_gem_object_put_unlocked(obj); 272 273 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ 274 rc->bo_handle = handle; 275 return 0; 276 } 277 278 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, 279 struct drm_file *file_priv) 280 { 281 struct drm_virtgpu_resource_info *ri = data; 282 struct drm_gem_object *gobj = NULL; 283 struct virtio_gpu_object *qobj = NULL; 284 285 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle); 286 if (gobj == NULL) 287 return -ENOENT; 288 289 qobj = gem_to_virtio_gpu_obj(gobj); 290 291 ri->size = qobj->gem_base.size; 292 ri->res_handle = qobj->hw_res_handle; 293 drm_gem_object_put_unlocked(gobj); 294 return 0; 295 } 296 297 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, 298 void *data, 299 struct drm_file *file) 300 { 301 struct virtio_gpu_device *vgdev = dev->dev_private; 302 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 303 struct drm_virtgpu_3d_transfer_from_host *args = data; 304 struct virtio_gpu_object_array *objs; 305 struct virtio_gpu_fence *fence; 306 int ret; 307 u32 offset = args->offset; 308 struct virtio_gpu_box box; 309 310 if (vgdev->has_virgl_3d == false) 311 return -ENOSYS; 312 313 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); 314 if (objs == NULL) 315 return -ENOENT; 316 317 ret = virtio_gpu_array_lock_resv(objs); 318 if (ret != 0) 319 goto err_put_free; 320 321 convert_to_hw_box(&box, &args->box); 322 323 fence = virtio_gpu_fence_alloc(vgdev); 324 if (!fence) { 325 ret = -ENOMEM; 326 goto err_unlock; 327 } 328 virtio_gpu_cmd_transfer_from_host_3d 329 (vgdev, vfpriv->ctx_id, offset, args->level, 330 &box, objs, fence); 331 dma_fence_put(&fence->f); 332 return 0; 333 334 err_unlock: 335 virtio_gpu_array_unlock_resv(objs); 336 err_put_free: 337 virtio_gpu_array_put_free(objs); 338 return ret; 339 } 340 341 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, 342 struct drm_file *file) 343 { 344 struct virtio_gpu_device *vgdev = dev->dev_private; 345 struct virtio_gpu_fpriv *vfpriv = file->driver_priv; 346 struct drm_virtgpu_3d_transfer_to_host *args = data; 347 struct virtio_gpu_object_array *objs; 348 struct virtio_gpu_fence *fence; 349 struct virtio_gpu_box box; 350 int ret; 351 u32 offset = args->offset; 352 353 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); 354 if (objs == NULL) 355 return -ENOENT; 356 357 convert_to_hw_box(&box, &args->box); 358 if (!vgdev->has_virgl_3d) { 359 virtio_gpu_cmd_transfer_to_host_2d 360 (vgdev, offset, 361 box.w, box.h, box.x, box.y, 362 objs, NULL); 363 } else { 364 ret = virtio_gpu_array_lock_resv(objs); 365 if (ret != 0) 366 goto err_put_free; 367 368 ret = -ENOMEM; 369 fence = virtio_gpu_fence_alloc(vgdev); 370 if (!fence) 371 goto err_unlock; 372 373 virtio_gpu_cmd_transfer_to_host_3d 374 (vgdev, 375 vfpriv ? vfpriv->ctx_id : 0, offset, 376 args->level, &box, objs, fence); 377 dma_fence_put(&fence->f); 378 } 379 return 0; 380 381 err_unlock: 382 virtio_gpu_array_unlock_resv(objs); 383 err_put_free: 384 virtio_gpu_array_put_free(objs); 385 return ret; 386 } 387 388 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, 389 struct drm_file *file) 390 { 391 struct drm_virtgpu_3d_wait *args = data; 392 struct drm_gem_object *obj; 393 long timeout = 15 * HZ; 394 int ret; 395 396 obj = drm_gem_object_lookup(file, args->handle); 397 if (obj == NULL) 398 return -ENOENT; 399 400 if (args->flags & VIRTGPU_WAIT_NOWAIT) { 401 ret = dma_resv_test_signaled_rcu(obj->resv, true); 402 } else { 403 ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, 404 timeout); 405 } 406 if (ret == 0) 407 ret = -EBUSY; 408 else if (ret > 0) 409 ret = 0; 410 411 drm_gem_object_put_unlocked(obj); 412 return ret; 413 } 414 415 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, 416 void *data, struct drm_file *file) 417 { 418 struct virtio_gpu_device *vgdev = dev->dev_private; 419 struct drm_virtgpu_get_caps *args = data; 420 unsigned size, host_caps_size; 421 int i; 422 int found_valid = -1; 423 int ret; 424 struct virtio_gpu_drv_cap_cache *cache_ent; 425 void *ptr; 426 427 if (vgdev->num_capsets == 0) 428 return -ENOSYS; 429 430 /* don't allow userspace to pass 0 */ 431 if (args->size == 0) 432 return -EINVAL; 433 434 spin_lock(&vgdev->display_info_lock); 435 for (i = 0; i < vgdev->num_capsets; i++) { 436 if (vgdev->capsets[i].id == args->cap_set_id) { 437 if (vgdev->capsets[i].max_version >= args->cap_set_ver) { 438 found_valid = i; 439 break; 440 } 441 } 442 } 443 444 if (found_valid == -1) { 445 spin_unlock(&vgdev->display_info_lock); 446 return -EINVAL; 447 } 448 449 host_caps_size = vgdev->capsets[found_valid].max_size; 450 /* only copy to user the minimum of the host caps size or the guest caps size */ 451 size = min(args->size, host_caps_size); 452 453 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { 454 if (cache_ent->id == args->cap_set_id && 455 cache_ent->version == args->cap_set_ver) { 456 spin_unlock(&vgdev->display_info_lock); 457 goto copy_exit; 458 } 459 } 460 spin_unlock(&vgdev->display_info_lock); 461 462 /* not in cache - need to talk to hw */ 463 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, 464 &cache_ent); 465 466 copy_exit: 467 ret = wait_event_timeout(vgdev->resp_wq, 468 atomic_read(&cache_ent->is_valid), 5 * HZ); 469 if (!ret) 470 return -EBUSY; 471 472 /* is_valid check must proceed before copy of the cache entry. */ 473 smp_rmb(); 474 475 ptr = cache_ent->caps_cache; 476 477 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size)) 478 return -EFAULT; 479 480 return 0; 481 } 482 483 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { 484 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, 485 DRM_RENDER_ALLOW), 486 487 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, 488 DRM_RENDER_ALLOW), 489 490 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, 491 DRM_RENDER_ALLOW), 492 493 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, 494 virtio_gpu_resource_create_ioctl, 495 DRM_RENDER_ALLOW), 496 497 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, 498 DRM_RENDER_ALLOW), 499 500 /* make transfer async to the main ring? - no sure, can we 501 * thread these in the underlying GL 502 */ 503 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, 504 virtio_gpu_transfer_from_host_ioctl, 505 DRM_RENDER_ALLOW), 506 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, 507 virtio_gpu_transfer_to_host_ioctl, 508 DRM_RENDER_ALLOW), 509 510 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, 511 DRM_RENDER_ALLOW), 512 513 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, 514 DRM_RENDER_ALLOW), 515 }; 516