xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision afc98d90)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <drm/drmP.h>
40 #include <drm/drm_vma_manager.h>
41 
42 /** @file drm_gem.c
43  *
44  * This file provides some of the base ioctls and library routines for
45  * the graphics memory manager implemented by each device driver.
46  *
47  * Because various devices have different requirements in terms of
48  * synchronization and migration strategies, implementing that is left up to
49  * the driver, and all that the general API provides should be generic --
50  * allocating objects, reading/writing data with the cpu, freeing objects.
51  * Even there, platform-dependent optimizations for reading/writing data with
52  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
53  * the DRI2 implementation wants to have at least allocate/mmap be generic.
54  *
55  * The goal was to have swap-backed object allocation managed through
56  * struct file.  However, file descriptors as handles to a struct file have
57  * two major failings:
58  * - Process limits prevent more than 1024 or so being used at a time by
59  *   default.
60  * - Inability to allocate high fds will aggravate the X Server's select()
61  *   handling, and likely that of many GL client applications as well.
62  *
63  * This led to a plan of using our own integer IDs (called handles, following
64  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65  * ioctls.  The objects themselves will still include the struct file so
66  * that we can transition to fds if the required kernel infrastructure shows
67  * up at a later date, and as our interface with shmfs for memory allocation.
68  */
69 
70 /*
71  * We make up offsets for buffer objects so we can recognize them at
72  * mmap time.
73  */
74 
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76  * the faked up offset will fit
77  */
78 
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #else
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #endif
86 
87 /**
88  * Initialize the GEM device fields
89  */
90 
91 int
92 drm_gem_init(struct drm_device *dev)
93 {
94 	struct drm_vma_offset_manager *vma_offset_manager;
95 
96 	mutex_init(&dev->object_name_lock);
97 	idr_init(&dev->object_name_idr);
98 
99 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
100 	if (!vma_offset_manager) {
101 		DRM_ERROR("out of memory\n");
102 		return -ENOMEM;
103 	}
104 
105 	dev->vma_offset_manager = vma_offset_manager;
106 	drm_vma_offset_manager_init(vma_offset_manager,
107 				    DRM_FILE_PAGE_OFFSET_START,
108 				    DRM_FILE_PAGE_OFFSET_SIZE);
109 
110 	return 0;
111 }
112 
113 void
114 drm_gem_destroy(struct drm_device *dev)
115 {
116 
117 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
118 	kfree(dev->vma_offset_manager);
119 	dev->vma_offset_manager = NULL;
120 }
121 
122 /**
123  * Initialize an already allocated GEM object of the specified size with
124  * shmfs backing store.
125  */
126 int drm_gem_object_init(struct drm_device *dev,
127 			struct drm_gem_object *obj, size_t size)
128 {
129 	struct file *filp;
130 
131 	drm_gem_private_object_init(dev, obj, size);
132 
133 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
134 	if (IS_ERR(filp))
135 		return PTR_ERR(filp);
136 
137 	obj->filp = filp;
138 
139 	return 0;
140 }
141 EXPORT_SYMBOL(drm_gem_object_init);
142 
143 /**
144  * Initialize an already allocated GEM object of the specified size with
145  * no GEM provided backing store. Instead the caller is responsible for
146  * backing the object and handling it.
147  */
148 void drm_gem_private_object_init(struct drm_device *dev,
149 				 struct drm_gem_object *obj, size_t size)
150 {
151 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
152 
153 	obj->dev = dev;
154 	obj->filp = NULL;
155 
156 	kref_init(&obj->refcount);
157 	obj->handle_count = 0;
158 	obj->size = size;
159 	drm_vma_node_reset(&obj->vma_node);
160 }
161 EXPORT_SYMBOL(drm_gem_private_object_init);
162 
163 static void
164 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
165 {
166 	/*
167 	 * Note: obj->dma_buf can't disappear as long as we still hold a
168 	 * handle reference in obj->handle_count.
169 	 */
170 	mutex_lock(&filp->prime.lock);
171 	if (obj->dma_buf) {
172 		drm_prime_remove_buf_handle_locked(&filp->prime,
173 						   obj->dma_buf);
174 	}
175 	mutex_unlock(&filp->prime.lock);
176 }
177 
178 /**
179  * Called after the last handle to the object has been closed
180  *
181  * Removes any name for the object. Note that this must be
182  * called before drm_gem_object_free or we'll be touching
183  * freed memory
184  */
185 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
186 {
187 	struct drm_device *dev = obj->dev;
188 
189 	/* Remove any name for this object */
190 	if (obj->name) {
191 		idr_remove(&dev->object_name_idr, obj->name);
192 		obj->name = 0;
193 	}
194 }
195 
196 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
197 {
198 	/* Unbreak the reference cycle if we have an exported dma_buf. */
199 	if (obj->dma_buf) {
200 		dma_buf_put(obj->dma_buf);
201 		obj->dma_buf = NULL;
202 	}
203 }
204 
205 static void
206 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
207 {
208 	if (WARN_ON(obj->handle_count == 0))
209 		return;
210 
211 	/*
212 	* Must bump handle count first as this may be the last
213 	* ref, in which case the object would disappear before we
214 	* checked for a name
215 	*/
216 
217 	mutex_lock(&obj->dev->object_name_lock);
218 	if (--obj->handle_count == 0) {
219 		drm_gem_object_handle_free(obj);
220 		drm_gem_object_exported_dma_buf_free(obj);
221 	}
222 	mutex_unlock(&obj->dev->object_name_lock);
223 
224 	drm_gem_object_unreference_unlocked(obj);
225 }
226 
227 /**
228  * Removes the mapping from handle to filp for this object.
229  */
230 int
231 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
232 {
233 	struct drm_device *dev;
234 	struct drm_gem_object *obj;
235 
236 	/* This is gross. The idr system doesn't let us try a delete and
237 	 * return an error code.  It just spews if you fail at deleting.
238 	 * So, we have to grab a lock around finding the object and then
239 	 * doing the delete on it and dropping the refcount, or the user
240 	 * could race us to double-decrement the refcount and cause a
241 	 * use-after-free later.  Given the frequency of our handle lookups,
242 	 * we may want to use ida for number allocation and a hash table
243 	 * for the pointers, anyway.
244 	 */
245 	spin_lock(&filp->table_lock);
246 
247 	/* Check if we currently have a reference on the object */
248 	obj = idr_find(&filp->object_idr, handle);
249 	if (obj == NULL) {
250 		spin_unlock(&filp->table_lock);
251 		return -EINVAL;
252 	}
253 	dev = obj->dev;
254 
255 	/* Release reference and decrement refcount. */
256 	idr_remove(&filp->object_idr, handle);
257 	spin_unlock(&filp->table_lock);
258 
259 	if (drm_core_check_feature(dev, DRIVER_PRIME))
260 		drm_gem_remove_prime_handles(obj, filp);
261 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
262 
263 	if (dev->driver->gem_close_object)
264 		dev->driver->gem_close_object(obj, filp);
265 	drm_gem_object_handle_unreference_unlocked(obj);
266 
267 	return 0;
268 }
269 EXPORT_SYMBOL(drm_gem_handle_delete);
270 
271 /**
272  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
273  *
274  * This implements the ->dumb_destroy kms driver callback for drivers which use
275  * gem to manage their backing storage.
276  */
277 int drm_gem_dumb_destroy(struct drm_file *file,
278 			 struct drm_device *dev,
279 			 uint32_t handle)
280 {
281 	return drm_gem_handle_delete(file, handle);
282 }
283 EXPORT_SYMBOL(drm_gem_dumb_destroy);
284 
285 /**
286  * drm_gem_handle_create_tail - internal functions to create a handle
287  *
288  * This expects the dev->object_name_lock to be held already and will drop it
289  * before returning. Used to avoid races in establishing new handles when
290  * importing an object from either an flink name or a dma-buf.
291  */
292 int
293 drm_gem_handle_create_tail(struct drm_file *file_priv,
294 			   struct drm_gem_object *obj,
295 			   u32 *handlep)
296 {
297 	struct drm_device *dev = obj->dev;
298 	int ret;
299 
300 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
301 
302 	/*
303 	 * Get the user-visible handle using idr.  Preload and perform
304 	 * allocation under our spinlock.
305 	 */
306 	idr_preload(GFP_KERNEL);
307 	spin_lock(&file_priv->table_lock);
308 
309 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
310 	drm_gem_object_reference(obj);
311 	obj->handle_count++;
312 	spin_unlock(&file_priv->table_lock);
313 	idr_preload_end();
314 	mutex_unlock(&dev->object_name_lock);
315 	if (ret < 0) {
316 		drm_gem_object_handle_unreference_unlocked(obj);
317 		return ret;
318 	}
319 	*handlep = ret;
320 
321 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
322 	if (ret) {
323 		drm_gem_handle_delete(file_priv, *handlep);
324 		return ret;
325 	}
326 
327 	if (dev->driver->gem_open_object) {
328 		ret = dev->driver->gem_open_object(obj, file_priv);
329 		if (ret) {
330 			drm_gem_handle_delete(file_priv, *handlep);
331 			return ret;
332 		}
333 	}
334 
335 	return 0;
336 }
337 
338 /**
339  * Create a handle for this object. This adds a handle reference
340  * to the object, which includes a regular reference count. Callers
341  * will likely want to dereference the object afterwards.
342  */
343 int
344 drm_gem_handle_create(struct drm_file *file_priv,
345 		       struct drm_gem_object *obj,
346 		       u32 *handlep)
347 {
348 	mutex_lock(&obj->dev->object_name_lock);
349 
350 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
351 }
352 EXPORT_SYMBOL(drm_gem_handle_create);
353 
354 
355 /**
356  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
357  * @obj: obj in question
358  *
359  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
360  */
361 void
362 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
363 {
364 	struct drm_device *dev = obj->dev;
365 
366 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
367 }
368 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
369 
370 /**
371  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
372  * @obj: obj in question
373  * @size: the virtual size
374  *
375  * GEM memory mapping works by handing back to userspace a fake mmap offset
376  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
377  * up the object based on the offset and sets up the various memory mapping
378  * structures.
379  *
380  * This routine allocates and attaches a fake offset for @obj, in cases where
381  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
382  * just use drm_gem_create_mmap_offset().
383  */
384 int
385 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
386 {
387 	struct drm_device *dev = obj->dev;
388 
389 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
390 				  size / PAGE_SIZE);
391 }
392 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
393 
394 /**
395  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
396  * @obj: obj in question
397  *
398  * GEM memory mapping works by handing back to userspace a fake mmap offset
399  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
400  * up the object based on the offset and sets up the various memory mapping
401  * structures.
402  *
403  * This routine allocates and attaches a fake offset for @obj.
404  */
405 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
406 {
407 	return drm_gem_create_mmap_offset_size(obj, obj->size);
408 }
409 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
410 
411 /**
412  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
413  * from shmem
414  * @obj: obj in question
415  * @gfpmask: gfp mask of requested pages
416  */
417 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
418 {
419 	struct inode *inode;
420 	struct address_space *mapping;
421 	struct page *p, **pages;
422 	int i, npages;
423 
424 	/* This is the shared memory object that backs the GEM resource */
425 	inode = file_inode(obj->filp);
426 	mapping = inode->i_mapping;
427 
428 	/* We already BUG_ON() for non-page-aligned sizes in
429 	 * drm_gem_object_init(), so we should never hit this unless
430 	 * driver author is doing something really wrong:
431 	 */
432 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
433 
434 	npages = obj->size >> PAGE_SHIFT;
435 
436 	pages = drm_malloc_ab(npages, sizeof(struct page *));
437 	if (pages == NULL)
438 		return ERR_PTR(-ENOMEM);
439 
440 	gfpmask |= mapping_gfp_mask(mapping);
441 
442 	for (i = 0; i < npages; i++) {
443 		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
444 		if (IS_ERR(p))
445 			goto fail;
446 		pages[i] = p;
447 
448 		/* There is a hypothetical issue w/ drivers that require
449 		 * buffer memory in the low 4GB.. if the pages are un-
450 		 * pinned, and swapped out, they can end up swapped back
451 		 * in above 4GB.  If pages are already in memory, then
452 		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
453 		 * even if the already in-memory page disobeys the mask.
454 		 *
455 		 * It is only a theoretical issue today, because none of
456 		 * the devices with this limitation can be populated with
457 		 * enough memory to trigger the issue.  But this BUG_ON()
458 		 * is here as a reminder in case the problem with
459 		 * shmem_read_mapping_page_gfp() isn't solved by the time
460 		 * it does become a real issue.
461 		 *
462 		 * See this thread: http://lkml.org/lkml/2011/7/11/238
463 		 */
464 		BUG_ON((gfpmask & __GFP_DMA32) &&
465 				(page_to_pfn(p) >= 0x00100000UL));
466 	}
467 
468 	return pages;
469 
470 fail:
471 	while (i--)
472 		page_cache_release(pages[i]);
473 
474 	drm_free_large(pages);
475 	return ERR_CAST(p);
476 }
477 EXPORT_SYMBOL(drm_gem_get_pages);
478 
479 /**
480  * drm_gem_put_pages - helper to free backing pages for a GEM object
481  * @obj: obj in question
482  * @pages: pages to free
483  * @dirty: if true, pages will be marked as dirty
484  * @accessed: if true, the pages will be marked as accessed
485  */
486 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
487 		bool dirty, bool accessed)
488 {
489 	int i, npages;
490 
491 	/* We already BUG_ON() for non-page-aligned sizes in
492 	 * drm_gem_object_init(), so we should never hit this unless
493 	 * driver author is doing something really wrong:
494 	 */
495 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
496 
497 	npages = obj->size >> PAGE_SHIFT;
498 
499 	for (i = 0; i < npages; i++) {
500 		if (dirty)
501 			set_page_dirty(pages[i]);
502 
503 		if (accessed)
504 			mark_page_accessed(pages[i]);
505 
506 		/* Undo the reference we took when populating the table */
507 		page_cache_release(pages[i]);
508 	}
509 
510 	drm_free_large(pages);
511 }
512 EXPORT_SYMBOL(drm_gem_put_pages);
513 
514 /** Returns a reference to the object named by the handle. */
515 struct drm_gem_object *
516 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
517 		      u32 handle)
518 {
519 	struct drm_gem_object *obj;
520 
521 	spin_lock(&filp->table_lock);
522 
523 	/* Check if we currently have a reference on the object */
524 	obj = idr_find(&filp->object_idr, handle);
525 	if (obj == NULL) {
526 		spin_unlock(&filp->table_lock);
527 		return NULL;
528 	}
529 
530 	drm_gem_object_reference(obj);
531 
532 	spin_unlock(&filp->table_lock);
533 
534 	return obj;
535 }
536 EXPORT_SYMBOL(drm_gem_object_lookup);
537 
538 /**
539  * Releases the handle to an mm object.
540  */
541 int
542 drm_gem_close_ioctl(struct drm_device *dev, void *data,
543 		    struct drm_file *file_priv)
544 {
545 	struct drm_gem_close *args = data;
546 	int ret;
547 
548 	if (!(dev->driver->driver_features & DRIVER_GEM))
549 		return -ENODEV;
550 
551 	ret = drm_gem_handle_delete(file_priv, args->handle);
552 
553 	return ret;
554 }
555 
556 /**
557  * Create a global name for an object, returning the name.
558  *
559  * Note that the name does not hold a reference; when the object
560  * is freed, the name goes away.
561  */
562 int
563 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
564 		    struct drm_file *file_priv)
565 {
566 	struct drm_gem_flink *args = data;
567 	struct drm_gem_object *obj;
568 	int ret;
569 
570 	if (!(dev->driver->driver_features & DRIVER_GEM))
571 		return -ENODEV;
572 
573 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
574 	if (obj == NULL)
575 		return -ENOENT;
576 
577 	mutex_lock(&dev->object_name_lock);
578 	idr_preload(GFP_KERNEL);
579 	/* prevent races with concurrent gem_close. */
580 	if (obj->handle_count == 0) {
581 		ret = -ENOENT;
582 		goto err;
583 	}
584 
585 	if (!obj->name) {
586 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
587 		if (ret < 0)
588 			goto err;
589 
590 		obj->name = ret;
591 	}
592 
593 	args->name = (uint64_t) obj->name;
594 	ret = 0;
595 
596 err:
597 	idr_preload_end();
598 	mutex_unlock(&dev->object_name_lock);
599 	drm_gem_object_unreference_unlocked(obj);
600 	return ret;
601 }
602 
603 /**
604  * Open an object using the global name, returning a handle and the size.
605  *
606  * This handle (of course) holds a reference to the object, so the object
607  * will not go away until the handle is deleted.
608  */
609 int
610 drm_gem_open_ioctl(struct drm_device *dev, void *data,
611 		   struct drm_file *file_priv)
612 {
613 	struct drm_gem_open *args = data;
614 	struct drm_gem_object *obj;
615 	int ret;
616 	u32 handle;
617 
618 	if (!(dev->driver->driver_features & DRIVER_GEM))
619 		return -ENODEV;
620 
621 	mutex_lock(&dev->object_name_lock);
622 	obj = idr_find(&dev->object_name_idr, (int) args->name);
623 	if (obj) {
624 		drm_gem_object_reference(obj);
625 	} else {
626 		mutex_unlock(&dev->object_name_lock);
627 		return -ENOENT;
628 	}
629 
630 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
631 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
632 	drm_gem_object_unreference_unlocked(obj);
633 	if (ret)
634 		return ret;
635 
636 	args->handle = handle;
637 	args->size = obj->size;
638 
639 	return 0;
640 }
641 
642 /**
643  * Called at device open time, sets up the structure for handling refcounting
644  * of mm objects.
645  */
646 void
647 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
648 {
649 	idr_init(&file_private->object_idr);
650 	spin_lock_init(&file_private->table_lock);
651 }
652 
653 /**
654  * Called at device close to release the file's
655  * handle references on objects.
656  */
657 static int
658 drm_gem_object_release_handle(int id, void *ptr, void *data)
659 {
660 	struct drm_file *file_priv = data;
661 	struct drm_gem_object *obj = ptr;
662 	struct drm_device *dev = obj->dev;
663 
664 	if (drm_core_check_feature(dev, DRIVER_PRIME))
665 		drm_gem_remove_prime_handles(obj, file_priv);
666 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
667 
668 	if (dev->driver->gem_close_object)
669 		dev->driver->gem_close_object(obj, file_priv);
670 
671 	drm_gem_object_handle_unreference_unlocked(obj);
672 
673 	return 0;
674 }
675 
676 /**
677  * Called at close time when the filp is going away.
678  *
679  * Releases any remaining references on objects by this filp.
680  */
681 void
682 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
683 {
684 	idr_for_each(&file_private->object_idr,
685 		     &drm_gem_object_release_handle, file_private);
686 	idr_destroy(&file_private->object_idr);
687 }
688 
689 void
690 drm_gem_object_release(struct drm_gem_object *obj)
691 {
692 	WARN_ON(obj->dma_buf);
693 
694 	if (obj->filp)
695 	    fput(obj->filp);
696 }
697 EXPORT_SYMBOL(drm_gem_object_release);
698 
699 /**
700  * Called after the last reference to the object has been lost.
701  * Must be called holding struct_ mutex
702  *
703  * Frees the object
704  */
705 void
706 drm_gem_object_free(struct kref *kref)
707 {
708 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
709 	struct drm_device *dev = obj->dev;
710 
711 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
712 
713 	if (dev->driver->gem_free_object != NULL)
714 		dev->driver->gem_free_object(obj);
715 }
716 EXPORT_SYMBOL(drm_gem_object_free);
717 
718 void drm_gem_vm_open(struct vm_area_struct *vma)
719 {
720 	struct drm_gem_object *obj = vma->vm_private_data;
721 
722 	drm_gem_object_reference(obj);
723 
724 	mutex_lock(&obj->dev->struct_mutex);
725 	drm_vm_open_locked(obj->dev, vma);
726 	mutex_unlock(&obj->dev->struct_mutex);
727 }
728 EXPORT_SYMBOL(drm_gem_vm_open);
729 
730 void drm_gem_vm_close(struct vm_area_struct *vma)
731 {
732 	struct drm_gem_object *obj = vma->vm_private_data;
733 	struct drm_device *dev = obj->dev;
734 
735 	mutex_lock(&dev->struct_mutex);
736 	drm_vm_close_locked(obj->dev, vma);
737 	drm_gem_object_unreference(obj);
738 	mutex_unlock(&dev->struct_mutex);
739 }
740 EXPORT_SYMBOL(drm_gem_vm_close);
741 
742 /**
743  * drm_gem_mmap_obj - memory map a GEM object
744  * @obj: the GEM object to map
745  * @obj_size: the object size to be mapped, in bytes
746  * @vma: VMA for the area to be mapped
747  *
748  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
749  * provided by the driver. Depending on their requirements, drivers can either
750  * provide a fault handler in their gem_vm_ops (in which case any accesses to
751  * the object will be trapped, to perform migration, GTT binding, surface
752  * register allocation, or performance monitoring), or mmap the buffer memory
753  * synchronously after calling drm_gem_mmap_obj.
754  *
755  * This function is mainly intended to implement the DMABUF mmap operation, when
756  * the GEM object is not looked up based on its fake offset. To implement the
757  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
758  *
759  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
760  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
761  * callers must verify access restrictions before calling this helper.
762  *
763  * NOTE: This function has to be protected with dev->struct_mutex
764  *
765  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
766  * size, or if no gem_vm_ops are provided.
767  */
768 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
769 		     struct vm_area_struct *vma)
770 {
771 	struct drm_device *dev = obj->dev;
772 
773 	lockdep_assert_held(&dev->struct_mutex);
774 
775 	/* Check for valid size. */
776 	if (obj_size < vma->vm_end - vma->vm_start)
777 		return -EINVAL;
778 
779 	if (!dev->driver->gem_vm_ops)
780 		return -EINVAL;
781 
782 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
783 	vma->vm_ops = dev->driver->gem_vm_ops;
784 	vma->vm_private_data = obj;
785 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
786 
787 	/* Take a ref for this mapping of the object, so that the fault
788 	 * handler can dereference the mmap offset's pointer to the object.
789 	 * This reference is cleaned up by the corresponding vm_close
790 	 * (which should happen whether the vma was created by this call, or
791 	 * by a vm_open due to mremap or partial unmap or whatever).
792 	 */
793 	drm_gem_object_reference(obj);
794 
795 	drm_vm_open_locked(dev, vma);
796 	return 0;
797 }
798 EXPORT_SYMBOL(drm_gem_mmap_obj);
799 
800 /**
801  * drm_gem_mmap - memory map routine for GEM objects
802  * @filp: DRM file pointer
803  * @vma: VMA for the area to be mapped
804  *
805  * If a driver supports GEM object mapping, mmap calls on the DRM file
806  * descriptor will end up here.
807  *
808  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
809  * contain the fake offset we created when the GTT map ioctl was called on
810  * the object) and map it with a call to drm_gem_mmap_obj().
811  *
812  * If the caller is not granted access to the buffer object, the mmap will fail
813  * with EACCES. Please see the vma manager for more information.
814  */
815 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
816 {
817 	struct drm_file *priv = filp->private_data;
818 	struct drm_device *dev = priv->minor->dev;
819 	struct drm_gem_object *obj;
820 	struct drm_vma_offset_node *node;
821 	int ret = 0;
822 
823 	if (drm_device_is_unplugged(dev))
824 		return -ENODEV;
825 
826 	mutex_lock(&dev->struct_mutex);
827 
828 	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
829 					   vma->vm_pgoff,
830 					   vma_pages(vma));
831 	if (!node) {
832 		mutex_unlock(&dev->struct_mutex);
833 		return drm_mmap(filp, vma);
834 	} else if (!drm_vma_node_is_allowed(node, filp)) {
835 		mutex_unlock(&dev->struct_mutex);
836 		return -EACCES;
837 	}
838 
839 	obj = container_of(node, struct drm_gem_object, vma_node);
840 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
841 
842 	mutex_unlock(&dev->struct_mutex);
843 
844 	return ret;
845 }
846 EXPORT_SYMBOL(drm_gem_mmap);
847