xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision b627b4ed)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include "drmP.h"
38 
39 /** @file drm_gem.c
40  *
41  * This file provides some of the base ioctls and library routines for
42  * the graphics memory manager implemented by each device driver.
43  *
44  * Because various devices have different requirements in terms of
45  * synchronization and migration strategies, implementing that is left up to
46  * the driver, and all that the general API provides should be generic --
47  * allocating objects, reading/writing data with the cpu, freeing objects.
48  * Even there, platform-dependent optimizations for reading/writing data with
49  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
50  * the DRI2 implementation wants to have at least allocate/mmap be generic.
51  *
52  * The goal was to have swap-backed object allocation managed through
53  * struct file.  However, file descriptors as handles to a struct file have
54  * two major failings:
55  * - Process limits prevent more than 1024 or so being used at a time by
56  *   default.
57  * - Inability to allocate high fds will aggravate the X Server's select()
58  *   handling, and likely that of many GL client applications as well.
59  *
60  * This led to a plan of using our own integer IDs (called handles, following
61  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
62  * ioctls.  The objects themselves will still include the struct file so
63  * that we can transition to fds if the required kernel infrastructure shows
64  * up at a later date, and as our interface with shmfs for memory allocation.
65  */
66 
67 /*
68  * We make up offsets for buffer objects so we can recognize them at
69  * mmap time.
70  */
71 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
72 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
73 
74 /**
75  * Initialize the GEM device fields
76  */
77 
78 int
79 drm_gem_init(struct drm_device *dev)
80 {
81 	struct drm_gem_mm *mm;
82 
83 	spin_lock_init(&dev->object_name_lock);
84 	idr_init(&dev->object_name_idr);
85 	atomic_set(&dev->object_count, 0);
86 	atomic_set(&dev->object_memory, 0);
87 	atomic_set(&dev->pin_count, 0);
88 	atomic_set(&dev->pin_memory, 0);
89 	atomic_set(&dev->gtt_count, 0);
90 	atomic_set(&dev->gtt_memory, 0);
91 
92 	mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
93 	if (!mm) {
94 		DRM_ERROR("out of memory\n");
95 		return -ENOMEM;
96 	}
97 
98 	dev->mm_private = mm;
99 
100 	if (drm_ht_create(&mm->offset_hash, 19)) {
101 		drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
102 		return -ENOMEM;
103 	}
104 
105 	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
106 			DRM_FILE_PAGE_OFFSET_SIZE)) {
107 		drm_ht_remove(&mm->offset_hash);
108 		drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
109 		return -ENOMEM;
110 	}
111 
112 	return 0;
113 }
114 
115 void
116 drm_gem_destroy(struct drm_device *dev)
117 {
118 	struct drm_gem_mm *mm = dev->mm_private;
119 
120 	drm_mm_takedown(&mm->offset_manager);
121 	drm_ht_remove(&mm->offset_hash);
122 	drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
123 	dev->mm_private = NULL;
124 }
125 
126 /**
127  * Allocate a GEM object of the specified size with shmfs backing store
128  */
129 struct drm_gem_object *
130 drm_gem_object_alloc(struct drm_device *dev, size_t size)
131 {
132 	struct drm_gem_object *obj;
133 
134 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 
136 	obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
137 
138 	obj->dev = dev;
139 	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
140 	if (IS_ERR(obj->filp)) {
141 		kfree(obj);
142 		return NULL;
143 	}
144 
145 	kref_init(&obj->refcount);
146 	kref_init(&obj->handlecount);
147 	obj->size = size;
148 	if (dev->driver->gem_init_object != NULL &&
149 	    dev->driver->gem_init_object(obj) != 0) {
150 		fput(obj->filp);
151 		kfree(obj);
152 		return NULL;
153 	}
154 	atomic_inc(&dev->object_count);
155 	atomic_add(obj->size, &dev->object_memory);
156 	return obj;
157 }
158 EXPORT_SYMBOL(drm_gem_object_alloc);
159 
160 /**
161  * Removes the mapping from handle to filp for this object.
162  */
163 static int
164 drm_gem_handle_delete(struct drm_file *filp, int handle)
165 {
166 	struct drm_device *dev;
167 	struct drm_gem_object *obj;
168 
169 	/* This is gross. The idr system doesn't let us try a delete and
170 	 * return an error code.  It just spews if you fail at deleting.
171 	 * So, we have to grab a lock around finding the object and then
172 	 * doing the delete on it and dropping the refcount, or the user
173 	 * could race us to double-decrement the refcount and cause a
174 	 * use-after-free later.  Given the frequency of our handle lookups,
175 	 * we may want to use ida for number allocation and a hash table
176 	 * for the pointers, anyway.
177 	 */
178 	spin_lock(&filp->table_lock);
179 
180 	/* Check if we currently have a reference on the object */
181 	obj = idr_find(&filp->object_idr, handle);
182 	if (obj == NULL) {
183 		spin_unlock(&filp->table_lock);
184 		return -EINVAL;
185 	}
186 	dev = obj->dev;
187 
188 	/* Release reference and decrement refcount. */
189 	idr_remove(&filp->object_idr, handle);
190 	spin_unlock(&filp->table_lock);
191 
192 	mutex_lock(&dev->struct_mutex);
193 	drm_gem_object_handle_unreference(obj);
194 	mutex_unlock(&dev->struct_mutex);
195 
196 	return 0;
197 }
198 
199 /**
200  * Create a handle for this object. This adds a handle reference
201  * to the object, which includes a regular reference count. Callers
202  * will likely want to dereference the object afterwards.
203  */
204 int
205 drm_gem_handle_create(struct drm_file *file_priv,
206 		       struct drm_gem_object *obj,
207 		       int *handlep)
208 {
209 	int	ret;
210 
211 	/*
212 	 * Get the user-visible handle using idr.
213 	 */
214 again:
215 	/* ensure there is space available to allocate a handle */
216 	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
217 		return -ENOMEM;
218 
219 	/* do the allocation under our spinlock */
220 	spin_lock(&file_priv->table_lock);
221 	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
222 	spin_unlock(&file_priv->table_lock);
223 	if (ret == -EAGAIN)
224 		goto again;
225 
226 	if (ret != 0)
227 		return ret;
228 
229 	drm_gem_object_handle_reference(obj);
230 	return 0;
231 }
232 EXPORT_SYMBOL(drm_gem_handle_create);
233 
234 /** Returns a reference to the object named by the handle. */
235 struct drm_gem_object *
236 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
237 		      int handle)
238 {
239 	struct drm_gem_object *obj;
240 
241 	spin_lock(&filp->table_lock);
242 
243 	/* Check if we currently have a reference on the object */
244 	obj = idr_find(&filp->object_idr, handle);
245 	if (obj == NULL) {
246 		spin_unlock(&filp->table_lock);
247 		return NULL;
248 	}
249 
250 	drm_gem_object_reference(obj);
251 
252 	spin_unlock(&filp->table_lock);
253 
254 	return obj;
255 }
256 EXPORT_SYMBOL(drm_gem_object_lookup);
257 
258 /**
259  * Releases the handle to an mm object.
260  */
261 int
262 drm_gem_close_ioctl(struct drm_device *dev, void *data,
263 		    struct drm_file *file_priv)
264 {
265 	struct drm_gem_close *args = data;
266 	int ret;
267 
268 	if (!(dev->driver->driver_features & DRIVER_GEM))
269 		return -ENODEV;
270 
271 	ret = drm_gem_handle_delete(file_priv, args->handle);
272 
273 	return ret;
274 }
275 
276 /**
277  * Create a global name for an object, returning the name.
278  *
279  * Note that the name does not hold a reference; when the object
280  * is freed, the name goes away.
281  */
282 int
283 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
284 		    struct drm_file *file_priv)
285 {
286 	struct drm_gem_flink *args = data;
287 	struct drm_gem_object *obj;
288 	int ret;
289 
290 	if (!(dev->driver->driver_features & DRIVER_GEM))
291 		return -ENODEV;
292 
293 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
294 	if (obj == NULL)
295 		return -EBADF;
296 
297 again:
298 	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
299 		ret = -ENOMEM;
300 		goto err;
301 	}
302 
303 	spin_lock(&dev->object_name_lock);
304 	if (!obj->name) {
305 		ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
306 					&obj->name);
307 		args->name = (uint64_t) obj->name;
308 		spin_unlock(&dev->object_name_lock);
309 
310 		if (ret == -EAGAIN)
311 			goto again;
312 
313 		if (ret != 0)
314 			goto err;
315 
316 		/* Allocate a reference for the name table.  */
317 		drm_gem_object_reference(obj);
318 	} else {
319 		args->name = (uint64_t) obj->name;
320 		spin_unlock(&dev->object_name_lock);
321 		ret = 0;
322 	}
323 
324 err:
325 	mutex_lock(&dev->struct_mutex);
326 	drm_gem_object_unreference(obj);
327 	mutex_unlock(&dev->struct_mutex);
328 	return ret;
329 }
330 
331 /**
332  * Open an object using the global name, returning a handle and the size.
333  *
334  * This handle (of course) holds a reference to the object, so the object
335  * will not go away until the handle is deleted.
336  */
337 int
338 drm_gem_open_ioctl(struct drm_device *dev, void *data,
339 		   struct drm_file *file_priv)
340 {
341 	struct drm_gem_open *args = data;
342 	struct drm_gem_object *obj;
343 	int ret;
344 	int handle;
345 
346 	if (!(dev->driver->driver_features & DRIVER_GEM))
347 		return -ENODEV;
348 
349 	spin_lock(&dev->object_name_lock);
350 	obj = idr_find(&dev->object_name_idr, (int) args->name);
351 	if (obj)
352 		drm_gem_object_reference(obj);
353 	spin_unlock(&dev->object_name_lock);
354 	if (!obj)
355 		return -ENOENT;
356 
357 	ret = drm_gem_handle_create(file_priv, obj, &handle);
358 	mutex_lock(&dev->struct_mutex);
359 	drm_gem_object_unreference(obj);
360 	mutex_unlock(&dev->struct_mutex);
361 	if (ret)
362 		return ret;
363 
364 	args->handle = handle;
365 	args->size = obj->size;
366 
367 	return 0;
368 }
369 
370 /**
371  * Called at device open time, sets up the structure for handling refcounting
372  * of mm objects.
373  */
374 void
375 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
376 {
377 	idr_init(&file_private->object_idr);
378 	spin_lock_init(&file_private->table_lock);
379 }
380 
381 /**
382  * Called at device close to release the file's
383  * handle references on objects.
384  */
385 static int
386 drm_gem_object_release_handle(int id, void *ptr, void *data)
387 {
388 	struct drm_gem_object *obj = ptr;
389 
390 	drm_gem_object_handle_unreference(obj);
391 
392 	return 0;
393 }
394 
395 /**
396  * Called at close time when the filp is going away.
397  *
398  * Releases any remaining references on objects by this filp.
399  */
400 void
401 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
402 {
403 	mutex_lock(&dev->struct_mutex);
404 	idr_for_each(&file_private->object_idr,
405 		     &drm_gem_object_release_handle, NULL);
406 
407 	idr_destroy(&file_private->object_idr);
408 	mutex_unlock(&dev->struct_mutex);
409 }
410 
411 /**
412  * Called after the last reference to the object has been lost.
413  *
414  * Frees the object
415  */
416 void
417 drm_gem_object_free(struct kref *kref)
418 {
419 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
420 	struct drm_device *dev = obj->dev;
421 
422 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
423 
424 	if (dev->driver->gem_free_object != NULL)
425 		dev->driver->gem_free_object(obj);
426 
427 	fput(obj->filp);
428 	atomic_dec(&dev->object_count);
429 	atomic_sub(obj->size, &dev->object_memory);
430 	kfree(obj);
431 }
432 EXPORT_SYMBOL(drm_gem_object_free);
433 
434 /**
435  * Called after the last handle to the object has been closed
436  *
437  * Removes any name for the object. Note that this must be
438  * called before drm_gem_object_free or we'll be touching
439  * freed memory
440  */
441 void
442 drm_gem_object_handle_free(struct kref *kref)
443 {
444 	struct drm_gem_object *obj = container_of(kref,
445 						  struct drm_gem_object,
446 						  handlecount);
447 	struct drm_device *dev = obj->dev;
448 
449 	/* Remove any name for this object */
450 	spin_lock(&dev->object_name_lock);
451 	if (obj->name) {
452 		idr_remove(&dev->object_name_idr, obj->name);
453 		obj->name = 0;
454 		spin_unlock(&dev->object_name_lock);
455 		/*
456 		 * The object name held a reference to this object, drop
457 		 * that now.
458 		 */
459 		drm_gem_object_unreference(obj);
460 	} else
461 		spin_unlock(&dev->object_name_lock);
462 
463 }
464 EXPORT_SYMBOL(drm_gem_object_handle_free);
465 
466 void drm_gem_vm_open(struct vm_area_struct *vma)
467 {
468 	struct drm_gem_object *obj = vma->vm_private_data;
469 
470 	drm_gem_object_reference(obj);
471 }
472 EXPORT_SYMBOL(drm_gem_vm_open);
473 
474 void drm_gem_vm_close(struct vm_area_struct *vma)
475 {
476 	struct drm_gem_object *obj = vma->vm_private_data;
477 	struct drm_device *dev = obj->dev;
478 
479 	mutex_lock(&dev->struct_mutex);
480 	drm_gem_object_unreference(obj);
481 	mutex_unlock(&dev->struct_mutex);
482 }
483 EXPORT_SYMBOL(drm_gem_vm_close);
484 
485 
486 /**
487  * drm_gem_mmap - memory map routine for GEM objects
488  * @filp: DRM file pointer
489  * @vma: VMA for the area to be mapped
490  *
491  * If a driver supports GEM object mapping, mmap calls on the DRM file
492  * descriptor will end up here.
493  *
494  * If we find the object based on the offset passed in (vma->vm_pgoff will
495  * contain the fake offset we created when the GTT map ioctl was called on
496  * the object), we set up the driver fault handler so that any accesses
497  * to the object can be trapped, to perform migration, GTT binding, surface
498  * register allocation, or performance monitoring.
499  */
500 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
501 {
502 	struct drm_file *priv = filp->private_data;
503 	struct drm_device *dev = priv->minor->dev;
504 	struct drm_gem_mm *mm = dev->mm_private;
505 	struct drm_local_map *map = NULL;
506 	struct drm_gem_object *obj;
507 	struct drm_hash_item *hash;
508 	int ret = 0;
509 
510 	mutex_lock(&dev->struct_mutex);
511 
512 	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
513 		mutex_unlock(&dev->struct_mutex);
514 		return drm_mmap(filp, vma);
515 	}
516 
517 	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
518 	if (!map ||
519 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
520 		ret =  -EPERM;
521 		goto out_unlock;
522 	}
523 
524 	/* Check for valid size. */
525 	if (map->size < vma->vm_end - vma->vm_start) {
526 		ret = -EINVAL;
527 		goto out_unlock;
528 	}
529 
530 	obj = map->handle;
531 	if (!obj->dev->driver->gem_vm_ops) {
532 		ret = -EINVAL;
533 		goto out_unlock;
534 	}
535 
536 	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
537 	vma->vm_ops = obj->dev->driver->gem_vm_ops;
538 	vma->vm_private_data = map->handle;
539 	/* FIXME: use pgprot_writecombine when available */
540 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
541 
542 	/* Take a ref for this mapping of the object, so that the fault
543 	 * handler can dereference the mmap offset's pointer to the object.
544 	 * This reference is cleaned up by the corresponding vm_close
545 	 * (which should happen whether the vma was created by this call, or
546 	 * by a vm_open due to mremap or partial unmap or whatever).
547 	 */
548 	drm_gem_object_reference(obj);
549 
550 	vma->vm_file = filp;	/* Needed for drm_vm_open() */
551 	drm_vm_open_locked(vma);
552 
553 out_unlock:
554 	mutex_unlock(&dev->struct_mutex);
555 
556 	return ret;
557 }
558 EXPORT_SYMBOL(drm_gem_mmap);
559