1 /* 2 * Copyright 2014 Canonical 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Andreas Pokorny 23 */ 24 25 #include <drm/drm_prime.h> 26 #include <linux/virtio_dma_buf.h> 27 28 #include "virtgpu_drv.h" 29 30 static int virtgpu_virtio_get_uuid(struct dma_buf *buf, 31 uuid_t *uuid) 32 { 33 struct drm_gem_object *obj = buf->priv; 34 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 35 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 36 37 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); 38 if (bo->uuid_state != STATE_OK) 39 return -ENODEV; 40 41 uuid_copy(uuid, &bo->uuid); 42 43 return 0; 44 } 45 46 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { 47 .ops = { 48 .cache_sgt_mapping = true, 49 .attach = virtio_dma_buf_attach, 50 .detach = drm_gem_map_detach, 51 .map_dma_buf = drm_gem_map_dma_buf, 52 .unmap_dma_buf = drm_gem_unmap_dma_buf, 53 .release = drm_gem_dmabuf_release, 54 .mmap = drm_gem_dmabuf_mmap, 55 .vmap = drm_gem_dmabuf_vmap, 56 .vunmap = drm_gem_dmabuf_vunmap, 57 }, 58 .device_attach = drm_gem_map_attach, 59 .get_uuid = virtgpu_virtio_get_uuid, 60 }; 61 62 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, 63 struct virtio_gpu_object *bo) 64 { 65 int ret; 66 struct virtio_gpu_object_array *objs; 67 68 objs = virtio_gpu_array_alloc(1); 69 if (!objs) 70 return -ENOMEM; 71 72 virtio_gpu_array_add_obj(objs, &bo->base.base); 73 ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); 74 if (ret) 75 return ret; 76 77 return 0; 78 } 79 80 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, 81 int flags) 82 { 83 struct dma_buf *buf; 84 struct drm_device *dev = obj->dev; 85 struct virtio_gpu_device *vgdev = dev->dev_private; 86 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 87 int ret = 0; 88 bool blob = bo->host3d_blob || bo->guest_blob; 89 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 90 91 if (!blob) { 92 if (vgdev->has_resource_assign_uuid) { 93 ret = virtio_gpu_resource_assign_uuid(vgdev, bo); 94 if (ret) 95 return ERR_PTR(ret); 96 97 virtio_gpu_notify(vgdev); 98 } else { 99 bo->uuid_state = STATE_ERR; 100 } 101 } 102 103 exp_info.ops = &virtgpu_dmabuf_ops.ops; 104 exp_info.size = obj->size; 105 exp_info.flags = flags; 106 exp_info.priv = obj; 107 exp_info.resv = obj->resv; 108 109 buf = virtio_dma_buf_export(&exp_info); 110 if (IS_ERR(buf)) 111 return buf; 112 113 drm_dev_get(dev); 114 drm_gem_object_get(obj); 115 116 return buf; 117 } 118 119 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, 120 struct dma_buf *buf) 121 { 122 struct drm_gem_object *obj; 123 124 if (buf->ops == &virtgpu_dmabuf_ops.ops) { 125 obj = buf->priv; 126 if (obj->dev == dev) { 127 /* 128 * Importing dmabuf exported from our own gem increases 129 * refcount on gem itself instead of f_count of dmabuf. 130 */ 131 drm_gem_object_get(obj); 132 return obj; 133 } 134 } 135 136 return drm_gem_prime_import(dev, buf); 137 } 138 139 struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 140 struct drm_device *dev, struct dma_buf_attachment *attach, 141 struct sg_table *table) 142 { 143 return ERR_PTR(-ENODEV); 144 } 145