xref: /openbmc/linux/drivers/gpu/drm/vgem/vgem_drv.c (revision ed84ef1c)
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  * Copyright © 2014 The Chromium OS Authors
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software")
7  * to deal in the software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * them Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Adam Jackson <ajax@redhat.com>
25  *	Ben Widawsky <ben@bwidawsk.net>
26  */
27 
28 /*
29  * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
30  * software renderer and the X server for efficient buffer sharing.
31  */
32 
33 #include <linux/dma-buf.h>
34 #include <linux/module.h>
35 #include <linux/platform_device.h>
36 #include <linux/shmem_fs.h>
37 #include <linux/vmalloc.h>
38 
39 #include <drm/drm_drv.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_ioctl.h>
42 #include <drm/drm_managed.h>
43 #include <drm/drm_prime.h>
44 
45 #include "vgem_drv.h"
46 
47 #define DRIVER_NAME	"vgem"
48 #define DRIVER_DESC	"Virtual GEM provider"
49 #define DRIVER_DATE	"20120112"
50 #define DRIVER_MAJOR	1
51 #define DRIVER_MINOR	0
52 
53 static const struct drm_gem_object_funcs vgem_gem_object_funcs;
54 
55 static struct vgem_device {
56 	struct drm_device drm;
57 	struct platform_device *platform;
58 } *vgem_device;
59 
60 static void vgem_gem_free_object(struct drm_gem_object *obj)
61 {
62 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
63 
64 	kvfree(vgem_obj->pages);
65 	mutex_destroy(&vgem_obj->pages_lock);
66 
67 	if (obj->import_attach)
68 		drm_prime_gem_destroy(obj, vgem_obj->table);
69 
70 	drm_gem_object_release(obj);
71 	kfree(vgem_obj);
72 }
73 
74 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
75 {
76 	struct vm_area_struct *vma = vmf->vma;
77 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
78 	/* We don't use vmf->pgoff since that has the fake offset */
79 	unsigned long vaddr = vmf->address;
80 	vm_fault_t ret = VM_FAULT_SIGBUS;
81 	loff_t num_pages;
82 	pgoff_t page_offset;
83 	page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
84 
85 	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
86 
87 	if (page_offset >= num_pages)
88 		return VM_FAULT_SIGBUS;
89 
90 	mutex_lock(&obj->pages_lock);
91 	if (obj->pages) {
92 		get_page(obj->pages[page_offset]);
93 		vmf->page = obj->pages[page_offset];
94 		ret = 0;
95 	}
96 	mutex_unlock(&obj->pages_lock);
97 	if (ret) {
98 		struct page *page;
99 
100 		page = shmem_read_mapping_page(
101 					file_inode(obj->base.filp)->i_mapping,
102 					page_offset);
103 		if (!IS_ERR(page)) {
104 			vmf->page = page;
105 			ret = 0;
106 		} else switch (PTR_ERR(page)) {
107 			case -ENOSPC:
108 			case -ENOMEM:
109 				ret = VM_FAULT_OOM;
110 				break;
111 			case -EBUSY:
112 				ret = VM_FAULT_RETRY;
113 				break;
114 			case -EFAULT:
115 			case -EINVAL:
116 				ret = VM_FAULT_SIGBUS;
117 				break;
118 			default:
119 				WARN_ON(PTR_ERR(page));
120 				ret = VM_FAULT_SIGBUS;
121 				break;
122 		}
123 
124 	}
125 	return ret;
126 }
127 
128 static const struct vm_operations_struct vgem_gem_vm_ops = {
129 	.fault = vgem_gem_fault,
130 	.open = drm_gem_vm_open,
131 	.close = drm_gem_vm_close,
132 };
133 
134 static int vgem_open(struct drm_device *dev, struct drm_file *file)
135 {
136 	struct vgem_file *vfile;
137 	int ret;
138 
139 	vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
140 	if (!vfile)
141 		return -ENOMEM;
142 
143 	file->driver_priv = vfile;
144 
145 	ret = vgem_fence_open(vfile);
146 	if (ret) {
147 		kfree(vfile);
148 		return ret;
149 	}
150 
151 	return 0;
152 }
153 
154 static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
155 {
156 	struct vgem_file *vfile = file->driver_priv;
157 
158 	vgem_fence_close(vfile);
159 	kfree(vfile);
160 }
161 
162 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
163 						unsigned long size)
164 {
165 	struct drm_vgem_gem_object *obj;
166 	int ret;
167 
168 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
169 	if (!obj)
170 		return ERR_PTR(-ENOMEM);
171 
172 	obj->base.funcs = &vgem_gem_object_funcs;
173 
174 	ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
175 	if (ret) {
176 		kfree(obj);
177 		return ERR_PTR(ret);
178 	}
179 
180 	mutex_init(&obj->pages_lock);
181 
182 	return obj;
183 }
184 
185 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
186 {
187 	drm_gem_object_release(&obj->base);
188 	kfree(obj);
189 }
190 
191 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
192 					      struct drm_file *file,
193 					      unsigned int *handle,
194 					      unsigned long size)
195 {
196 	struct drm_vgem_gem_object *obj;
197 	int ret;
198 
199 	obj = __vgem_gem_create(dev, size);
200 	if (IS_ERR(obj))
201 		return ERR_CAST(obj);
202 
203 	ret = drm_gem_handle_create(file, &obj->base, handle);
204 	if (ret) {
205 		drm_gem_object_put(&obj->base);
206 		return ERR_PTR(ret);
207 	}
208 
209 	return &obj->base;
210 }
211 
212 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
213 				struct drm_mode_create_dumb *args)
214 {
215 	struct drm_gem_object *gem_object;
216 	u64 pitch, size;
217 
218 	pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
219 	size = args->height * pitch;
220 	if (size == 0)
221 		return -EINVAL;
222 
223 	gem_object = vgem_gem_create(dev, file, &args->handle, size);
224 	if (IS_ERR(gem_object))
225 		return PTR_ERR(gem_object);
226 
227 	args->size = gem_object->size;
228 	args->pitch = pitch;
229 
230 	drm_gem_object_put(gem_object);
231 
232 	DRM_DEBUG("Created object of size %llu\n", args->size);
233 
234 	return 0;
235 }
236 
237 static struct drm_ioctl_desc vgem_ioctls[] = {
238 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
239 	DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
240 };
241 
242 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
243 {
244 	unsigned long flags = vma->vm_flags;
245 	int ret;
246 
247 	ret = drm_gem_mmap(filp, vma);
248 	if (ret)
249 		return ret;
250 
251 	/* Keep the WC mmaping set by drm_gem_mmap() but our pages
252 	 * are ordinary and not special.
253 	 */
254 	vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
255 	return 0;
256 }
257 
258 static const struct file_operations vgem_driver_fops = {
259 	.owner		= THIS_MODULE,
260 	.open		= drm_open,
261 	.mmap		= vgem_mmap,
262 	.poll		= drm_poll,
263 	.read		= drm_read,
264 	.unlocked_ioctl = drm_ioctl,
265 	.compat_ioctl	= drm_compat_ioctl,
266 	.release	= drm_release,
267 };
268 
269 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
270 {
271 	mutex_lock(&bo->pages_lock);
272 	if (bo->pages_pin_count++ == 0) {
273 		struct page **pages;
274 
275 		pages = drm_gem_get_pages(&bo->base);
276 		if (IS_ERR(pages)) {
277 			bo->pages_pin_count--;
278 			mutex_unlock(&bo->pages_lock);
279 			return pages;
280 		}
281 
282 		bo->pages = pages;
283 	}
284 	mutex_unlock(&bo->pages_lock);
285 
286 	return bo->pages;
287 }
288 
289 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
290 {
291 	mutex_lock(&bo->pages_lock);
292 	if (--bo->pages_pin_count == 0) {
293 		drm_gem_put_pages(&bo->base, bo->pages, true, true);
294 		bo->pages = NULL;
295 	}
296 	mutex_unlock(&bo->pages_lock);
297 }
298 
299 static int vgem_prime_pin(struct drm_gem_object *obj)
300 {
301 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
302 	long n_pages = obj->size >> PAGE_SHIFT;
303 	struct page **pages;
304 
305 	pages = vgem_pin_pages(bo);
306 	if (IS_ERR(pages))
307 		return PTR_ERR(pages);
308 
309 	/* Flush the object from the CPU cache so that importers can rely
310 	 * on coherent indirect access via the exported dma-address.
311 	 */
312 	drm_clflush_pages(pages, n_pages);
313 
314 	return 0;
315 }
316 
317 static void vgem_prime_unpin(struct drm_gem_object *obj)
318 {
319 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
320 
321 	vgem_unpin_pages(bo);
322 }
323 
324 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
325 {
326 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
327 
328 	return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
329 }
330 
331 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
332 						struct dma_buf *dma_buf)
333 {
334 	struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
335 
336 	return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
337 }
338 
339 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
340 			struct dma_buf_attachment *attach, struct sg_table *sg)
341 {
342 	struct drm_vgem_gem_object *obj;
343 	int npages;
344 
345 	obj = __vgem_gem_create(dev, attach->dmabuf->size);
346 	if (IS_ERR(obj))
347 		return ERR_CAST(obj);
348 
349 	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
350 
351 	obj->table = sg;
352 	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
353 	if (!obj->pages) {
354 		__vgem_gem_destroy(obj);
355 		return ERR_PTR(-ENOMEM);
356 	}
357 
358 	obj->pages_pin_count++; /* perma-pinned */
359 	drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
360 	return &obj->base;
361 }
362 
363 static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
364 {
365 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
366 	long n_pages = obj->size >> PAGE_SHIFT;
367 	struct page **pages;
368 	void *vaddr;
369 
370 	pages = vgem_pin_pages(bo);
371 	if (IS_ERR(pages))
372 		return PTR_ERR(pages);
373 
374 	vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
375 	if (!vaddr)
376 		return -ENOMEM;
377 	dma_buf_map_set_vaddr(map, vaddr);
378 
379 	return 0;
380 }
381 
382 static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
383 {
384 	struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
385 
386 	vunmap(map->vaddr);
387 	vgem_unpin_pages(bo);
388 }
389 
390 static int vgem_prime_mmap(struct drm_gem_object *obj,
391 			   struct vm_area_struct *vma)
392 {
393 	int ret;
394 
395 	if (obj->size < vma->vm_end - vma->vm_start)
396 		return -EINVAL;
397 
398 	if (!obj->filp)
399 		return -ENODEV;
400 
401 	ret = call_mmap(obj->filp, vma);
402 	if (ret)
403 		return ret;
404 
405 	vma_set_file(vma, obj->filp);
406 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
407 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
408 
409 	return 0;
410 }
411 
412 static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
413 	.free = vgem_gem_free_object,
414 	.pin = vgem_prime_pin,
415 	.unpin = vgem_prime_unpin,
416 	.get_sg_table = vgem_prime_get_sg_table,
417 	.vmap = vgem_prime_vmap,
418 	.vunmap = vgem_prime_vunmap,
419 	.vm_ops = &vgem_gem_vm_ops,
420 };
421 
422 static const struct drm_driver vgem_driver = {
423 	.driver_features		= DRIVER_GEM | DRIVER_RENDER,
424 	.open				= vgem_open,
425 	.postclose			= vgem_postclose,
426 	.ioctls				= vgem_ioctls,
427 	.num_ioctls 			= ARRAY_SIZE(vgem_ioctls),
428 	.fops				= &vgem_driver_fops,
429 
430 	.dumb_create			= vgem_gem_dumb_create,
431 
432 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
433 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
434 	.gem_prime_import = vgem_prime_import,
435 	.gem_prime_import_sg_table = vgem_prime_import_sg_table,
436 	.gem_prime_mmap = vgem_prime_mmap,
437 
438 	.name	= DRIVER_NAME,
439 	.desc	= DRIVER_DESC,
440 	.date	= DRIVER_DATE,
441 	.major	= DRIVER_MAJOR,
442 	.minor	= DRIVER_MINOR,
443 };
444 
445 static int __init vgem_init(void)
446 {
447 	int ret;
448 	struct platform_device *pdev;
449 
450 	pdev = platform_device_register_simple("vgem", -1, NULL, 0);
451 	if (IS_ERR(pdev))
452 		return PTR_ERR(pdev);
453 
454 	if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
455 		ret = -ENOMEM;
456 		goto out_unregister;
457 	}
458 
459 	dma_coerce_mask_and_coherent(&pdev->dev,
460 				     DMA_BIT_MASK(64));
461 
462 	vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver,
463 					 struct vgem_device, drm);
464 	if (IS_ERR(vgem_device)) {
465 		ret = PTR_ERR(vgem_device);
466 		goto out_devres;
467 	}
468 	vgem_device->platform = pdev;
469 
470 	/* Final step: expose the device/driver to userspace */
471 	ret = drm_dev_register(&vgem_device->drm, 0);
472 	if (ret)
473 		goto out_devres;
474 
475 	return 0;
476 
477 out_devres:
478 	devres_release_group(&pdev->dev, NULL);
479 out_unregister:
480 	platform_device_unregister(pdev);
481 	return ret;
482 }
483 
484 static void __exit vgem_exit(void)
485 {
486 	struct platform_device *pdev = vgem_device->platform;
487 
488 	drm_dev_unregister(&vgem_device->drm);
489 	devres_release_group(&pdev->dev, NULL);
490 	platform_device_unregister(pdev);
491 }
492 
493 module_init(vgem_init);
494 module_exit(vgem_exit);
495 
496 MODULE_AUTHOR("Red Hat, Inc.");
497 MODULE_AUTHOR("Intel Corporation");
498 MODULE_DESCRIPTION(DRIVER_DESC);
499 MODULE_LICENSE("GPL and additional rights");
500