1 /* 2 * Copyright 2014 Canonical 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Andreas Pokorny 23 */ 24 25 #include <drm/drm_prime.h> 26 #include <linux/virtio_dma_buf.h> 27 28 #include "virtgpu_drv.h" 29 30 static int virtgpu_virtio_get_uuid(struct dma_buf *buf, 31 uuid_t *uuid) 32 { 33 struct drm_gem_object *obj = buf->priv; 34 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 35 struct virtio_gpu_device *vgdev = obj->dev->dev_private; 36 37 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); 38 if (bo->uuid_state != STATE_OK) 39 return -ENODEV; 40 41 uuid_copy(uuid, &bo->uuid); 42 43 return 0; 44 } 45 46 static struct sg_table * 47 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, 48 enum dma_data_direction dir) 49 { 50 struct drm_gem_object *obj = attach->dmabuf->priv; 51 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 52 53 if (virtio_gpu_is_vram(bo)) 54 return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); 55 56 return drm_gem_map_dma_buf(attach, dir); 57 } 58 59 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 60 struct sg_table *sgt, 61 enum dma_data_direction dir) 62 { 63 struct drm_gem_object *obj = attach->dmabuf->priv; 64 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 65 66 if (virtio_gpu_is_vram(bo)) { 67 virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); 68 return; 69 } 70 71 drm_gem_unmap_dma_buf(attach, sgt, dir); 72 } 73 74 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { 75 .ops = { 76 .cache_sgt_mapping = true, 77 .attach = virtio_dma_buf_attach, 78 .detach = drm_gem_map_detach, 79 .map_dma_buf = virtgpu_gem_map_dma_buf, 80 .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, 81 .release = drm_gem_dmabuf_release, 82 .mmap = drm_gem_dmabuf_mmap, 83 .vmap = drm_gem_dmabuf_vmap, 84 .vunmap = drm_gem_dmabuf_vunmap, 85 }, 86 .device_attach = drm_gem_map_attach, 87 .get_uuid = virtgpu_virtio_get_uuid, 88 }; 89 90 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, 91 struct virtio_gpu_object *bo) 92 { 93 int ret; 94 struct virtio_gpu_object_array *objs; 95 96 objs = virtio_gpu_array_alloc(1); 97 if (!objs) 98 return -ENOMEM; 99 100 virtio_gpu_array_add_obj(objs, &bo->base.base); 101 ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); 102 if (ret) 103 return ret; 104 105 return 0; 106 } 107 108 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, 109 int flags) 110 { 111 struct dma_buf *buf; 112 struct drm_device *dev = obj->dev; 113 struct virtio_gpu_device *vgdev = dev->dev_private; 114 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); 115 int ret = 0; 116 bool blob = bo->host3d_blob || bo->guest_blob; 117 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 118 119 if (!blob) { 120 if (vgdev->has_resource_assign_uuid) { 121 ret = virtio_gpu_resource_assign_uuid(vgdev, bo); 122 if (ret) 123 return ERR_PTR(ret); 124 125 virtio_gpu_notify(vgdev); 126 } else { 127 bo->uuid_state = STATE_ERR; 128 } 129 } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) { 130 bo->uuid_state = STATE_ERR; 131 } 132 133 exp_info.ops = &virtgpu_dmabuf_ops.ops; 134 exp_info.size = obj->size; 135 exp_info.flags = flags; 136 exp_info.priv = obj; 137 exp_info.resv = obj->resv; 138 139 buf = virtio_dma_buf_export(&exp_info); 140 if (IS_ERR(buf)) 141 return buf; 142 143 drm_dev_get(dev); 144 drm_gem_object_get(obj); 145 146 return buf; 147 } 148 149 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, 150 struct dma_buf *buf) 151 { 152 struct drm_gem_object *obj; 153 154 if (buf->ops == &virtgpu_dmabuf_ops.ops) { 155 obj = buf->priv; 156 if (obj->dev == dev) { 157 /* 158 * Importing dmabuf exported from our own gem increases 159 * refcount on gem itself instead of f_count of dmabuf. 160 */ 161 drm_gem_object_get(obj); 162 return obj; 163 } 164 } 165 166 return drm_gem_prime_import(dev, buf); 167 } 168 169 struct drm_gem_object *virtgpu_gem_prime_import_sg_table( 170 struct drm_device *dev, struct dma_buf_attachment *attach, 171 struct sg_table *table) 172 { 173 return ERR_PTR(-ENODEV); 174 } 175