1 /* 2 * Copyright 2014 Canonical 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Andreas Pokorny 23 */ 24 25 #include <drm/drm_prime.h> 26 #include <linux/virtio_dma_buf.h> 27 28 #include "virtgpu_drv.h" 29 30 static int virtgpu_virtio_get_uuid(struct dma_buf *buf, 31 uuid_t *uuid) 32 { 33 struct drm_gem_object *obj = buf->priv; 34 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 35 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 36 37 wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING); 38 if (bo->uuid_state != UUID_INITIALIZED) 39 return -ENODEV; 40 41 uuid_copy(uuid, &bo->uuid); 42 43 return 0; 44 } 45 46 const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { 47 .ops = { 48 .cache_sgt_mapping = true, 49 .attach = virtio_dma_buf_attach, 50 .detach = drm_gem_map_detach, 51 .map_dma_buf = drm_gem_map_dma_buf, 52 .unmap_dma_buf = drm_gem_unmap_dma_buf, 53 .release = drm_gem_dmabuf_release, 54 .mmap = drm_gem_dmabuf_mmap, 55 .vmap = drm_gem_dmabuf_vmap, 56 .vunmap = drm_gem_dmabuf_vunmap, 57 }, 58 .device_attach = drm_gem_map_attach, 59 .get_uuid = virtgpu_virtio_get_uuid, 60 }; 61 62 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, 63 int flags) 64 { 65 struct dma_buf *buf; 66 struct drm_device *dev = obj->dev; 67 struct virtio_gpu_device *vgdev = dev->dev_private; 68 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 69 struct virtio_gpu_object_array *objs; 70 int ret = 0; 71 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 72 73 if (vgdev->has_resource_assign_uuid) { 74 objs = virtio_gpu_array_alloc(1); 75 if (!objs) 76 return ERR_PTR(-ENOMEM); 77 virtio_gpu_array_add_obj(objs, &bo->base.base); 78 79 ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); 80 if (ret) 81 return ERR_PTR(ret); 82 virtio_gpu_notify(vgdev); 83 } else { 84 bo->uuid_state = UUID_INITIALIZATION_FAILED; 85 } 86 87 exp_info.ops = &virtgpu_dmabuf_ops.ops; 88 exp_info.size = obj->size; 89 exp_info.flags = flags; 90 exp_info.priv = obj; 91 exp_info.resv = obj->resv; 92 93 buf = virtio_dma_buf_export(&exp_info); 94 if (IS_ERR(buf)) 95 return buf; 96 97 drm_dev_get(dev); 98 drm_gem_object_get(obj); 99 100 return buf; 101 } 102 103 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, 104 struct dma_buf *buf) 105 { 106 struct drm_gem_object *obj; 107 108 if (buf->ops == &virtgpu_dmabuf_ops.ops) { 109 obj = buf->priv; 110 if (obj->dev == dev) { 111 /* 112 * Importing dmabuf exported from our own gem increases 113 * refcount on gem itself instead of f_count of dmabuf. 114 */ 115 drm_gem_object_get(obj); 116 return obj; 117 } 118 } 119 120 return drm_gem_prime_import(dev, buf); 121 } 122 123 struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 124 struct drm_device *dev, struct dma_buf_attachment *attach, 125 struct sg_table *table) 126 { 127 return ERR_PTR(-ENODEV); 128 } 129