1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10 
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
14 
15 #include <drm/drm_gem.h>
16 #include <drm/drm_prime.h>
17 #include <drm/drm_probe_helper.h>
18 
19 #include <xen/balloon.h>
20 #include <xen/xen.h>
21 
22 #include "xen_drm_front.h"
23 #include "xen_drm_front_gem.h"
24 
25 struct xen_gem_object {
26 	struct drm_gem_object base;
27 
28 	size_t num_pages;
29 	struct page **pages;
30 
31 	/* set for buffers allocated by the backend */
32 	bool be_alloc;
33 
34 	/* this is for imported PRIME buffer */
35 	struct sg_table *sgt_imported;
36 };
37 
38 static inline struct xen_gem_object *
to_xen_gem_obj(struct drm_gem_object * gem_obj)39 to_xen_gem_obj(struct drm_gem_object *gem_obj)
40 {
41 	return container_of(gem_obj, struct xen_gem_object, base);
42 }
43 
gem_alloc_pages_array(struct xen_gem_object * xen_obj,size_t buf_size)44 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
45 				 size_t buf_size)
46 {
47 	xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
48 	xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
49 					sizeof(struct page *), GFP_KERNEL);
50 	return !xen_obj->pages ? -ENOMEM : 0;
51 }
52 
gem_free_pages_array(struct xen_gem_object * xen_obj)53 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
54 {
55 	kvfree(xen_obj->pages);
56 	xen_obj->pages = NULL;
57 }
58 
xen_drm_front_gem_object_mmap(struct drm_gem_object * gem_obj,struct vm_area_struct * vma)59 static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
60 					 struct vm_area_struct *vma)
61 {
62 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
63 	int ret;
64 
65 	vma->vm_ops = gem_obj->funcs->vm_ops;
66 
67 	/*
68 	 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
69 	 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
70 	 * the whole buffer.
71 	 */
72 	vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
73 	vma->vm_pgoff = 0;
74 
75 	/*
76 	 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
77 	 * all memory which is shared with other entities in the system
78 	 * (including the hypervisor and other guests) must reside in memory
79 	 * which is mapped as Normal Inner Write-Back Outer Write-Back
80 	 * Inner-Shareable.
81 	 */
82 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83 
84 	/*
85 	 * vm_operations_struct.fault handler will be called if CPU access
86 	 * to VM is here. For GPUs this isn't the case, because CPU  doesn't
87 	 * touch the memory. Insert pages now, so both CPU and GPU are happy.
88 	 *
89 	 * FIXME: as we insert all the pages now then no .fault handler must
90 	 * be called, so don't provide one
91 	 */
92 	ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
93 	if (ret < 0)
94 		DRM_ERROR("Failed to map pages into vma: %d\n", ret);
95 
96 	return ret;
97 }
98 
99 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
100 	.open           = drm_gem_vm_open,
101 	.close          = drm_gem_vm_close,
102 };
103 
104 static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
105 	.free = xen_drm_front_gem_object_free,
106 	.get_sg_table = xen_drm_front_gem_get_sg_table,
107 	.vmap = xen_drm_front_gem_prime_vmap,
108 	.vunmap = xen_drm_front_gem_prime_vunmap,
109 	.mmap = xen_drm_front_gem_object_mmap,
110 	.vm_ops = &xen_drm_drv_vm_ops,
111 };
112 
gem_create_obj(struct drm_device * dev,size_t size)113 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
114 					     size_t size)
115 {
116 	struct xen_gem_object *xen_obj;
117 	int ret;
118 
119 	xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
120 	if (!xen_obj)
121 		return ERR_PTR(-ENOMEM);
122 
123 	xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
124 
125 	ret = drm_gem_object_init(dev, &xen_obj->base, size);
126 	if (ret < 0) {
127 		kfree(xen_obj);
128 		return ERR_PTR(ret);
129 	}
130 
131 	return xen_obj;
132 }
133 
gem_create(struct drm_device * dev,size_t size)134 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
135 {
136 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
137 	struct xen_gem_object *xen_obj;
138 	int ret;
139 
140 	size = round_up(size, PAGE_SIZE);
141 	xen_obj = gem_create_obj(dev, size);
142 	if (IS_ERR(xen_obj))
143 		return xen_obj;
144 
145 	if (drm_info->front_info->cfg.be_alloc) {
146 		/*
147 		 * backend will allocate space for this buffer, so
148 		 * only allocate array of pointers to pages
149 		 */
150 		ret = gem_alloc_pages_array(xen_obj, size);
151 		if (ret < 0)
152 			goto fail;
153 
154 		/*
155 		 * allocate ballooned pages which will be used to map
156 		 * grant references provided by the backend
157 		 */
158 		ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
159 					          xen_obj->pages);
160 		if (ret < 0) {
161 			DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
162 				  xen_obj->num_pages, ret);
163 			gem_free_pages_array(xen_obj);
164 			goto fail;
165 		}
166 
167 		xen_obj->be_alloc = true;
168 		return xen_obj;
169 	}
170 	/*
171 	 * need to allocate backing pages now, so we can share those
172 	 * with the backend
173 	 */
174 	xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
175 	xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
176 	if (IS_ERR(xen_obj->pages)) {
177 		ret = PTR_ERR(xen_obj->pages);
178 		xen_obj->pages = NULL;
179 		goto fail;
180 	}
181 
182 	return xen_obj;
183 
184 fail:
185 	DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
186 	return ERR_PTR(ret);
187 }
188 
xen_drm_front_gem_create(struct drm_device * dev,size_t size)189 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
190 						size_t size)
191 {
192 	struct xen_gem_object *xen_obj;
193 
194 	xen_obj = gem_create(dev, size);
195 	if (IS_ERR(xen_obj))
196 		return ERR_CAST(xen_obj);
197 
198 	return &xen_obj->base;
199 }
200 
xen_drm_front_gem_free_object_unlocked(struct drm_gem_object * gem_obj)201 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
202 {
203 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
204 
205 	if (xen_obj->base.import_attach) {
206 		drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
207 		gem_free_pages_array(xen_obj);
208 	} else {
209 		if (xen_obj->pages) {
210 			if (xen_obj->be_alloc) {
211 				xen_free_unpopulated_pages(xen_obj->num_pages,
212 							   xen_obj->pages);
213 				gem_free_pages_array(xen_obj);
214 			} else {
215 				drm_gem_put_pages(&xen_obj->base,
216 						  xen_obj->pages, true, false);
217 			}
218 		}
219 	}
220 	drm_gem_object_release(gem_obj);
221 	kfree(xen_obj);
222 }
223 
xen_drm_front_gem_get_pages(struct drm_gem_object * gem_obj)224 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
225 {
226 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
227 
228 	return xen_obj->pages;
229 }
230 
xen_drm_front_gem_get_sg_table(struct drm_gem_object * gem_obj)231 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
232 {
233 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
234 
235 	if (!xen_obj->pages)
236 		return ERR_PTR(-ENOMEM);
237 
238 	return drm_prime_pages_to_sg(gem_obj->dev,
239 				     xen_obj->pages, xen_obj->num_pages);
240 }
241 
242 struct drm_gem_object *
xen_drm_front_gem_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)243 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
244 				  struct dma_buf_attachment *attach,
245 				  struct sg_table *sgt)
246 {
247 	struct xen_drm_front_drm_info *drm_info = dev->dev_private;
248 	struct xen_gem_object *xen_obj;
249 	size_t size;
250 	int ret;
251 
252 	size = attach->dmabuf->size;
253 	xen_obj = gem_create_obj(dev, size);
254 	if (IS_ERR(xen_obj))
255 		return ERR_CAST(xen_obj);
256 
257 	ret = gem_alloc_pages_array(xen_obj, size);
258 	if (ret < 0)
259 		return ERR_PTR(ret);
260 
261 	xen_obj->sgt_imported = sgt;
262 
263 	ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
264 					 xen_obj->num_pages);
265 	if (ret < 0)
266 		return ERR_PTR(ret);
267 
268 	ret = xen_drm_front_dbuf_create(drm_info->front_info,
269 					xen_drm_front_dbuf_to_cookie(&xen_obj->base),
270 					0, 0, 0, size, sgt->sgl->offset,
271 					xen_obj->pages);
272 	if (ret < 0)
273 		return ERR_PTR(ret);
274 
275 	DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
276 		  size, sgt->orig_nents);
277 
278 	return &xen_obj->base;
279 }
280 
xen_drm_front_gem_prime_vmap(struct drm_gem_object * gem_obj,struct iosys_map * map)281 int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
282 				 struct iosys_map *map)
283 {
284 	struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
285 	void *vaddr;
286 
287 	if (!xen_obj->pages)
288 		return -ENOMEM;
289 
290 	/* Please see comment in gem_mmap_obj on mapping and attributes. */
291 	vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
292 		     VM_MAP, PAGE_KERNEL);
293 	if (!vaddr)
294 		return -ENOMEM;
295 	iosys_map_set_vaddr(map, vaddr);
296 
297 	return 0;
298 }
299 
xen_drm_front_gem_prime_vunmap(struct drm_gem_object * gem_obj,struct iosys_map * map)300 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
301 				    struct iosys_map *map)
302 {
303 	vunmap(map->vaddr);
304 }
305