xref: /openbmc/linux/drivers/gpu/drm/vgem/vgem_drv.c (revision a2cce7a9)
1 /*
2  * Copyright 2011 Red Hat, Inc.
3  * Copyright © 2014 The Chromium OS Authors
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software")
7  * to deal in the software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * them Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
20  * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Adam Jackson <ajax@redhat.com>
25  *	Ben Widawsky <ben@bwidawsk.net>
26  */
27 
28 /**
29  * This is vgem, a (non-hardware-backed) GEM service.  This is used by Mesa's
30  * software renderer and the X server for efficient buffer sharing.
31  */
32 
33 #include <linux/module.h>
34 #include <linux/ramfs.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/dma-buf.h>
37 #include "vgem_drv.h"
38 
39 #define DRIVER_NAME	"vgem"
40 #define DRIVER_DESC	"Virtual GEM provider"
41 #define DRIVER_DATE	"20120112"
42 #define DRIVER_MAJOR	1
43 #define DRIVER_MINOR	0
44 
45 void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
46 {
47 	drm_gem_put_pages(&obj->base, obj->pages, false, false);
48 	obj->pages = NULL;
49 }
50 
51 static void vgem_gem_free_object(struct drm_gem_object *obj)
52 {
53 	struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
54 
55 	drm_gem_free_mmap_offset(obj);
56 
57 	if (vgem_obj->use_dma_buf && obj->dma_buf) {
58 		dma_buf_put(obj->dma_buf);
59 		obj->dma_buf = NULL;
60 	}
61 
62 	drm_gem_object_release(obj);
63 
64 	if (vgem_obj->pages)
65 		vgem_gem_put_pages(vgem_obj);
66 
67 	vgem_obj->pages = NULL;
68 
69 	kfree(vgem_obj);
70 }
71 
72 int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
73 {
74 	struct page **pages;
75 
76 	if (obj->pages || obj->use_dma_buf)
77 		return 0;
78 
79 	pages = drm_gem_get_pages(&obj->base);
80 	if (IS_ERR(pages)) {
81 		return PTR_ERR(pages);
82 	}
83 
84 	obj->pages = pages;
85 
86 	return 0;
87 }
88 
89 static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90 {
91 	struct drm_vgem_gem_object *obj = vma->vm_private_data;
92 	struct drm_device *dev = obj->base.dev;
93 	loff_t num_pages;
94 	pgoff_t page_offset;
95 	int ret;
96 
97 	/* We don't use vmf->pgoff since that has the fake offset */
98 	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
99 		PAGE_SHIFT;
100 
101 	num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
102 
103 	if (page_offset > num_pages)
104 		return VM_FAULT_SIGBUS;
105 
106 	mutex_lock(&dev->struct_mutex);
107 
108 	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
109 			     obj->pages[page_offset]);
110 
111 	mutex_unlock(&dev->struct_mutex);
112 	switch (ret) {
113 	case 0:
114 		return VM_FAULT_NOPAGE;
115 	case -ENOMEM:
116 		return VM_FAULT_OOM;
117 	case -EBUSY:
118 		return VM_FAULT_RETRY;
119 	case -EFAULT:
120 	case -EINVAL:
121 		return VM_FAULT_SIGBUS;
122 	default:
123 		WARN_ON(1);
124 		return VM_FAULT_SIGBUS;
125 	}
126 }
127 
128 static const struct vm_operations_struct vgem_gem_vm_ops = {
129 	.fault = vgem_gem_fault,
130 	.open = drm_gem_vm_open,
131 	.close = drm_gem_vm_close,
132 };
133 
134 /* ioctls */
135 
136 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
137 					      struct drm_file *file,
138 					      unsigned int *handle,
139 					      unsigned long size)
140 {
141 	struct drm_vgem_gem_object *obj;
142 	struct drm_gem_object *gem_object;
143 	int err;
144 
145 	size = roundup(size, PAGE_SIZE);
146 
147 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
148 	if (!obj)
149 		return ERR_PTR(-ENOMEM);
150 
151 	gem_object = &obj->base;
152 
153 	err = drm_gem_object_init(dev, gem_object, size);
154 	if (err)
155 		goto out;
156 
157 	err = drm_gem_handle_create(file, gem_object, handle);
158 	if (err)
159 		goto handle_out;
160 
161 	drm_gem_object_unreference_unlocked(gem_object);
162 
163 	return gem_object;
164 
165 handle_out:
166 	drm_gem_object_release(gem_object);
167 out:
168 	kfree(obj);
169 	return ERR_PTR(err);
170 }
171 
172 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
173 				struct drm_mode_create_dumb *args)
174 {
175 	struct drm_gem_object *gem_object;
176 	uint64_t size;
177 	uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
178 
179 	size = args->height * pitch;
180 	if (size == 0)
181 		return -EINVAL;
182 
183 	gem_object = vgem_gem_create(dev, file, &args->handle, size);
184 
185 	if (IS_ERR(gem_object)) {
186 		DRM_DEBUG_DRIVER("object creation failed\n");
187 		return PTR_ERR(gem_object);
188 	}
189 
190 	args->size = gem_object->size;
191 	args->pitch = pitch;
192 
193 	DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
194 
195 	return 0;
196 }
197 
198 int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
199 		      uint32_t handle, uint64_t *offset)
200 {
201 	int ret = 0;
202 	struct drm_gem_object *obj;
203 
204 	mutex_lock(&dev->struct_mutex);
205 	obj = drm_gem_object_lookup(dev, file, handle);
206 	if (!obj) {
207 		ret = -ENOENT;
208 		goto unlock;
209 	}
210 
211 	if (!drm_vma_node_has_offset(&obj->vma_node)) {
212 		ret = drm_gem_create_mmap_offset(obj);
213 		if (ret)
214 			goto unref;
215 	}
216 
217 	BUG_ON(!obj->filp);
218 
219 	obj->filp->private_data = obj;
220 
221 	ret = vgem_gem_get_pages(to_vgem_bo(obj));
222 	if (ret)
223 		goto fail_get_pages;
224 
225 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
226 
227 	goto unref;
228 
229 fail_get_pages:
230 	drm_gem_free_mmap_offset(obj);
231 unref:
232 	drm_gem_object_unreference(obj);
233 unlock:
234 	mutex_unlock(&dev->struct_mutex);
235 	return ret;
236 }
237 
238 int vgem_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
239 {
240 	struct drm_file *priv = filp->private_data;
241 	struct drm_device *dev = priv->minor->dev;
242 	struct drm_vma_offset_node *node;
243 	struct drm_gem_object *obj;
244 	struct drm_vgem_gem_object *vgem_obj;
245 	int ret = 0;
246 
247 	mutex_lock(&dev->struct_mutex);
248 
249 	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
250 					   vma->vm_pgoff,
251 					   vma_pages(vma));
252 	if (!node) {
253 		ret = -EINVAL;
254 		goto out_unlock;
255 	} else if (!drm_vma_node_is_allowed(node, filp)) {
256 		ret = -EACCES;
257 		goto out_unlock;
258 	}
259 
260 	obj = container_of(node, struct drm_gem_object, vma_node);
261 
262 	vgem_obj = to_vgem_bo(obj);
263 
264 	if (obj->dma_buf && vgem_obj->use_dma_buf) {
265 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
266 		goto out_unlock;
267 	}
268 
269 	if (!obj->dev->driver->gem_vm_ops) {
270 		ret = -EINVAL;
271 		goto out_unlock;
272 	}
273 
274 	vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
275 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
276 	vma->vm_private_data = vgem_obj;
277 	vma->vm_page_prot =
278 		pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
279 
280 	mutex_unlock(&dev->struct_mutex);
281 	drm_gem_vm_open(vma);
282 	return ret;
283 
284 out_unlock:
285 	mutex_unlock(&dev->struct_mutex);
286 
287 	return ret;
288 }
289 
290 
291 static struct drm_ioctl_desc vgem_ioctls[] = {
292 };
293 
294 static const struct file_operations vgem_driver_fops = {
295 	.owner		= THIS_MODULE,
296 	.open		= drm_open,
297 	.mmap		= vgem_drm_gem_mmap,
298 	.poll		= drm_poll,
299 	.read		= drm_read,
300 	.unlocked_ioctl = drm_ioctl,
301 	.release	= drm_release,
302 };
303 
304 static struct drm_driver vgem_driver = {
305 	.driver_features		= DRIVER_GEM,
306 	.gem_free_object		= vgem_gem_free_object,
307 	.gem_vm_ops			= &vgem_gem_vm_ops,
308 	.ioctls				= vgem_ioctls,
309 	.fops				= &vgem_driver_fops,
310 	.dumb_create			= vgem_gem_dumb_create,
311 	.dumb_map_offset		= vgem_gem_dumb_map,
312 	.name	= DRIVER_NAME,
313 	.desc	= DRIVER_DESC,
314 	.date	= DRIVER_DATE,
315 	.major	= DRIVER_MAJOR,
316 	.minor	= DRIVER_MINOR,
317 };
318 
319 struct drm_device *vgem_device;
320 
321 static int __init vgem_init(void)
322 {
323 	int ret;
324 
325 	vgem_device = drm_dev_alloc(&vgem_driver, NULL);
326 	if (!vgem_device) {
327 		ret = -ENOMEM;
328 		goto out;
329 	}
330 
331 	drm_dev_set_unique(vgem_device, "vgem");
332 
333 	ret  = drm_dev_register(vgem_device, 0);
334 
335 	if (ret)
336 		goto out_unref;
337 
338 	return 0;
339 
340 out_unref:
341 	drm_dev_unref(vgem_device);
342 out:
343 	return ret;
344 }
345 
346 static void __exit vgem_exit(void)
347 {
348 	drm_dev_unregister(vgem_device);
349 	drm_dev_unref(vgem_device);
350 }
351 
352 module_init(vgem_init);
353 module_exit(vgem_exit);
354 
355 MODULE_AUTHOR("Red Hat, Inc.");
356 MODULE_DESCRIPTION(DRIVER_DESC);
357 MODULE_LICENSE("GPL and additional rights");
358