1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
28 
29 #include "virtgpu_drv.h"
30 
31 static int virtio_gpu_virglrenderer_workaround = 1;
32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33 
34 int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
35 {
36 	if (virtio_gpu_virglrenderer_workaround) {
37 		/*
38 		 * Hack to avoid re-using resource IDs.
39 		 *
40 		 * virglrenderer versions up to (and including) 0.7.0
41 		 * can't deal with that.  virglrenderer commit
42 		 * "f91a9dd35715 Fix unlinking resources from hash
43 		 * table." (Feb 2019) fixes the bug.
44 		 */
45 		static atomic_t seqno = ATOMIC_INIT(0);
46 		int handle = atomic_inc_return(&seqno);
47 		*resid = handle + 1;
48 	} else {
49 		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
50 		if (handle < 0)
51 			return handle;
52 		*resid = handle + 1;
53 	}
54 	return 0;
55 }
56 
57 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
58 {
59 	if (!virtio_gpu_virglrenderer_workaround) {
60 		ida_free(&vgdev->resource_ida, id - 1);
61 	}
62 }
63 
64 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
65 {
66 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
67 
68 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
69 	if (virtio_gpu_is_shmem(bo)) {
70 		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
71 
72 		if (shmem->pages) {
73 			if (shmem->mapped) {
74 				dma_unmap_sgtable(vgdev->vdev->dev.parent,
75 					     shmem->pages, DMA_TO_DEVICE, 0);
76 				shmem->mapped = 0;
77 			}
78 
79 			sg_free_table(shmem->pages);
80 			kfree(shmem->pages);
81 			shmem->pages = NULL;
82 			drm_gem_shmem_unpin(&bo->base);
83 		}
84 
85 		drm_gem_shmem_free(&bo->base);
86 	} else if (virtio_gpu_is_vram(bo)) {
87 		struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
88 
89 		spin_lock(&vgdev->host_visible_lock);
90 		if (drm_mm_node_allocated(&vram->vram_node))
91 			drm_mm_remove_node(&vram->vram_node);
92 
93 		spin_unlock(&vgdev->host_visible_lock);
94 
95 		drm_gem_free_mmap_offset(&vram->base.base.base);
96 		drm_gem_object_release(&vram->base.base.base);
97 		kfree(vram);
98 	}
99 }
100 
101 static void virtio_gpu_free_object(struct drm_gem_object *obj)
102 {
103 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
104 	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
105 
106 	if (bo->created) {
107 		virtio_gpu_cmd_unref_resource(vgdev, bo);
108 		virtio_gpu_notify(vgdev);
109 		/* completion handler calls virtio_gpu_cleanup_object() */
110 		return;
111 	}
112 	virtio_gpu_cleanup_object(bo);
113 }
114 
115 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
116 	.free = virtio_gpu_free_object,
117 	.open = virtio_gpu_gem_object_open,
118 	.close = virtio_gpu_gem_object_close,
119 	.print_info = drm_gem_shmem_object_print_info,
120 	.export = virtgpu_gem_prime_export,
121 	.pin = drm_gem_shmem_object_pin,
122 	.unpin = drm_gem_shmem_object_unpin,
123 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
124 	.vmap = drm_gem_shmem_object_vmap,
125 	.vunmap = drm_gem_shmem_object_vunmap,
126 	.mmap = drm_gem_shmem_object_mmap,
127 };
128 
129 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
130 {
131 	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
132 }
133 
134 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
135 						size_t size)
136 {
137 	struct virtio_gpu_object_shmem *shmem;
138 	struct drm_gem_shmem_object *dshmem;
139 
140 	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
141 	if (!shmem)
142 		return ERR_PTR(-ENOMEM);
143 
144 	dshmem = &shmem->base.base;
145 	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
146 	return &dshmem->base;
147 }
148 
149 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
150 					struct virtio_gpu_object *bo,
151 					struct virtio_gpu_mem_entry **ents,
152 					unsigned int *nents)
153 {
154 	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
155 	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
156 	struct scatterlist *sg;
157 	int si, ret;
158 
159 	ret = drm_gem_shmem_pin(&bo->base);
160 	if (ret < 0)
161 		return -EINVAL;
162 
163 	/*
164 	 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
165 	 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
166 	 * dma-ops. This is discouraged for other drivers, but should be fine
167 	 * since virtio_gpu doesn't support dma-buf import from other devices.
168 	 */
169 	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
170 	if (!shmem->pages) {
171 		drm_gem_shmem_unpin(&bo->base);
172 		return -EINVAL;
173 	}
174 
175 	if (use_dma_api) {
176 		ret = dma_map_sgtable(vgdev->vdev->dev.parent,
177 				      shmem->pages, DMA_TO_DEVICE, 0);
178 		if (ret)
179 			return ret;
180 		*nents = shmem->mapped = shmem->pages->nents;
181 	} else {
182 		*nents = shmem->pages->orig_nents;
183 	}
184 
185 	*ents = kvmalloc_array(*nents,
186 			       sizeof(struct virtio_gpu_mem_entry),
187 			       GFP_KERNEL);
188 	if (!(*ents)) {
189 		DRM_ERROR("failed to allocate ent list\n");
190 		return -ENOMEM;
191 	}
192 
193 	if (use_dma_api) {
194 		for_each_sgtable_dma_sg(shmem->pages, sg, si) {
195 			(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
196 			(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
197 			(*ents)[si].padding = 0;
198 		}
199 	} else {
200 		for_each_sgtable_sg(shmem->pages, sg, si) {
201 			(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
202 			(*ents)[si].length = cpu_to_le32(sg->length);
203 			(*ents)[si].padding = 0;
204 		}
205 	}
206 
207 	return 0;
208 }
209 
210 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
211 			     struct virtio_gpu_object_params *params,
212 			     struct virtio_gpu_object **bo_ptr,
213 			     struct virtio_gpu_fence *fence)
214 {
215 	struct virtio_gpu_object_array *objs = NULL;
216 	struct drm_gem_shmem_object *shmem_obj;
217 	struct virtio_gpu_object *bo;
218 	struct virtio_gpu_mem_entry *ents;
219 	unsigned int nents;
220 	int ret;
221 
222 	*bo_ptr = NULL;
223 
224 	params->size = roundup(params->size, PAGE_SIZE);
225 	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
226 	if (IS_ERR(shmem_obj))
227 		return PTR_ERR(shmem_obj);
228 	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
229 
230 	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
231 	if (ret < 0)
232 		goto err_free_gem;
233 
234 	bo->dumb = params->dumb;
235 
236 	if (fence) {
237 		ret = -ENOMEM;
238 		objs = virtio_gpu_array_alloc(1);
239 		if (!objs)
240 			goto err_put_id;
241 		virtio_gpu_array_add_obj(objs, &bo->base.base);
242 
243 		ret = virtio_gpu_array_lock_resv(objs);
244 		if (ret != 0)
245 			goto err_put_objs;
246 	}
247 
248 	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
249 	if (ret != 0) {
250 		virtio_gpu_array_put_free(objs);
251 		virtio_gpu_free_object(&shmem_obj->base);
252 		return ret;
253 	}
254 
255 	if (params->blob) {
256 		if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
257 			bo->guest_blob = true;
258 
259 		virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
260 						    ents, nents);
261 	} else if (params->virgl) {
262 		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
263 						  objs, fence);
264 		virtio_gpu_object_attach(vgdev, bo, ents, nents);
265 	} else {
266 		virtio_gpu_cmd_create_resource(vgdev, bo, params,
267 					       objs, fence);
268 		virtio_gpu_object_attach(vgdev, bo, ents, nents);
269 	}
270 
271 	*bo_ptr = bo;
272 	return 0;
273 
274 err_put_objs:
275 	virtio_gpu_array_put_free(objs);
276 err_put_id:
277 	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
278 err_free_gem:
279 	drm_gem_shmem_free(shmem_obj);
280 	return ret;
281 }
282