xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision f7777dcc)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <drm/drmP.h>
40 #include <drm/drm_vma_manager.h>
41 
42 /** @file drm_gem.c
43  *
44  * This file provides some of the base ioctls and library routines for
45  * the graphics memory manager implemented by each device driver.
46  *
47  * Because various devices have different requirements in terms of
48  * synchronization and migration strategies, implementing that is left up to
49  * the driver, and all that the general API provides should be generic --
50  * allocating objects, reading/writing data with the cpu, freeing objects.
51  * Even there, platform-dependent optimizations for reading/writing data with
52  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
53  * the DRI2 implementation wants to have at least allocate/mmap be generic.
54  *
55  * The goal was to have swap-backed object allocation managed through
56  * struct file.  However, file descriptors as handles to a struct file have
57  * two major failings:
58  * - Process limits prevent more than 1024 or so being used at a time by
59  *   default.
60  * - Inability to allocate high fds will aggravate the X Server's select()
61  *   handling, and likely that of many GL client applications as well.
62  *
63  * This led to a plan of using our own integer IDs (called handles, following
64  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65  * ioctls.  The objects themselves will still include the struct file so
66  * that we can transition to fds if the required kernel infrastructure shows
67  * up at a later date, and as our interface with shmfs for memory allocation.
68  */
69 
70 /*
71  * We make up offsets for buffer objects so we can recognize them at
72  * mmap time.
73  */
74 
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76  * the faked up offset will fit
77  */
78 
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #else
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #endif
86 
87 /**
88  * Initialize the GEM device fields
89  */
90 
91 int
92 drm_gem_init(struct drm_device *dev)
93 {
94 	struct drm_gem_mm *mm;
95 
96 	mutex_init(&dev->object_name_lock);
97 	idr_init(&dev->object_name_idr);
98 
99 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
100 	if (!mm) {
101 		DRM_ERROR("out of memory\n");
102 		return -ENOMEM;
103 	}
104 
105 	dev->mm_private = mm;
106 	drm_vma_offset_manager_init(&mm->vma_manager,
107 				    DRM_FILE_PAGE_OFFSET_START,
108 				    DRM_FILE_PAGE_OFFSET_SIZE);
109 
110 	return 0;
111 }
112 
113 void
114 drm_gem_destroy(struct drm_device *dev)
115 {
116 	struct drm_gem_mm *mm = dev->mm_private;
117 
118 	drm_vma_offset_manager_destroy(&mm->vma_manager);
119 	kfree(mm);
120 	dev->mm_private = NULL;
121 }
122 
123 /**
124  * Initialize an already allocated GEM object of the specified size with
125  * shmfs backing store.
126  */
127 int drm_gem_object_init(struct drm_device *dev,
128 			struct drm_gem_object *obj, size_t size)
129 {
130 	struct file *filp;
131 
132 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
133 	if (IS_ERR(filp))
134 		return PTR_ERR(filp);
135 
136 	drm_gem_private_object_init(dev, obj, size);
137 	obj->filp = filp;
138 
139 	return 0;
140 }
141 EXPORT_SYMBOL(drm_gem_object_init);
142 
143 /**
144  * Initialize an already allocated GEM object of the specified size with
145  * no GEM provided backing store. Instead the caller is responsible for
146  * backing the object and handling it.
147  */
148 void drm_gem_private_object_init(struct drm_device *dev,
149 				 struct drm_gem_object *obj, size_t size)
150 {
151 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
152 
153 	obj->dev = dev;
154 	obj->filp = NULL;
155 
156 	kref_init(&obj->refcount);
157 	obj->handle_count = 0;
158 	obj->size = size;
159 	drm_vma_node_reset(&obj->vma_node);
160 }
161 EXPORT_SYMBOL(drm_gem_private_object_init);
162 
163 /**
164  * Allocate a GEM object of the specified size with shmfs backing store
165  */
166 struct drm_gem_object *
167 drm_gem_object_alloc(struct drm_device *dev, size_t size)
168 {
169 	struct drm_gem_object *obj;
170 
171 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
172 	if (!obj)
173 		goto free;
174 
175 	if (drm_gem_object_init(dev, obj, size) != 0)
176 		goto free;
177 
178 	if (dev->driver->gem_init_object != NULL &&
179 	    dev->driver->gem_init_object(obj) != 0) {
180 		goto fput;
181 	}
182 	return obj;
183 fput:
184 	/* Object_init mangles the global counters - readjust them. */
185 	fput(obj->filp);
186 free:
187 	kfree(obj);
188 	return NULL;
189 }
190 EXPORT_SYMBOL(drm_gem_object_alloc);
191 
192 static void
193 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
194 {
195 	/*
196 	 * Note: obj->dma_buf can't disappear as long as we still hold a
197 	 * handle reference in obj->handle_count.
198 	 */
199 	mutex_lock(&filp->prime.lock);
200 	if (obj->dma_buf) {
201 		drm_prime_remove_buf_handle_locked(&filp->prime,
202 						   obj->dma_buf);
203 	}
204 	mutex_unlock(&filp->prime.lock);
205 }
206 
207 static void drm_gem_object_ref_bug(struct kref *list_kref)
208 {
209 	BUG();
210 }
211 
212 /**
213  * Called after the last handle to the object has been closed
214  *
215  * Removes any name for the object. Note that this must be
216  * called before drm_gem_object_free or we'll be touching
217  * freed memory
218  */
219 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
220 {
221 	struct drm_device *dev = obj->dev;
222 
223 	/* Remove any name for this object */
224 	if (obj->name) {
225 		idr_remove(&dev->object_name_idr, obj->name);
226 		obj->name = 0;
227 		/*
228 		 * The object name held a reference to this object, drop
229 		 * that now.
230 		*
231 		* This cannot be the last reference, since the handle holds one too.
232 		 */
233 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
234 	}
235 }
236 
237 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
238 {
239 	/* Unbreak the reference cycle if we have an exported dma_buf. */
240 	if (obj->dma_buf) {
241 		dma_buf_put(obj->dma_buf);
242 		obj->dma_buf = NULL;
243 	}
244 }
245 
246 static void
247 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
248 {
249 	if (WARN_ON(obj->handle_count == 0))
250 		return;
251 
252 	/*
253 	* Must bump handle count first as this may be the last
254 	* ref, in which case the object would disappear before we
255 	* checked for a name
256 	*/
257 
258 	mutex_lock(&obj->dev->object_name_lock);
259 	if (--obj->handle_count == 0) {
260 		drm_gem_object_handle_free(obj);
261 		drm_gem_object_exported_dma_buf_free(obj);
262 	}
263 	mutex_unlock(&obj->dev->object_name_lock);
264 
265 	drm_gem_object_unreference_unlocked(obj);
266 }
267 
268 /**
269  * Removes the mapping from handle to filp for this object.
270  */
271 int
272 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
273 {
274 	struct drm_device *dev;
275 	struct drm_gem_object *obj;
276 
277 	/* This is gross. The idr system doesn't let us try a delete and
278 	 * return an error code.  It just spews if you fail at deleting.
279 	 * So, we have to grab a lock around finding the object and then
280 	 * doing the delete on it and dropping the refcount, or the user
281 	 * could race us to double-decrement the refcount and cause a
282 	 * use-after-free later.  Given the frequency of our handle lookups,
283 	 * we may want to use ida for number allocation and a hash table
284 	 * for the pointers, anyway.
285 	 */
286 	spin_lock(&filp->table_lock);
287 
288 	/* Check if we currently have a reference on the object */
289 	obj = idr_find(&filp->object_idr, handle);
290 	if (obj == NULL) {
291 		spin_unlock(&filp->table_lock);
292 		return -EINVAL;
293 	}
294 	dev = obj->dev;
295 
296 	/* Release reference and decrement refcount. */
297 	idr_remove(&filp->object_idr, handle);
298 	spin_unlock(&filp->table_lock);
299 
300 	if (drm_core_check_feature(dev, DRIVER_PRIME))
301 		drm_gem_remove_prime_handles(obj, filp);
302 	drm_vma_node_revoke(&obj->vma_node, filp->filp);
303 
304 	if (dev->driver->gem_close_object)
305 		dev->driver->gem_close_object(obj, filp);
306 	drm_gem_object_handle_unreference_unlocked(obj);
307 
308 	return 0;
309 }
310 EXPORT_SYMBOL(drm_gem_handle_delete);
311 
312 /**
313  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
314  *
315  * This implements the ->dumb_destroy kms driver callback for drivers which use
316  * gem to manage their backing storage.
317  */
318 int drm_gem_dumb_destroy(struct drm_file *file,
319 			 struct drm_device *dev,
320 			 uint32_t handle)
321 {
322 	return drm_gem_handle_delete(file, handle);
323 }
324 EXPORT_SYMBOL(drm_gem_dumb_destroy);
325 
326 /**
327  * drm_gem_handle_create_tail - internal functions to create a handle
328  *
329  * This expects the dev->object_name_lock to be held already and will drop it
330  * before returning. Used to avoid races in establishing new handles when
331  * importing an object from either an flink name or a dma-buf.
332  */
333 int
334 drm_gem_handle_create_tail(struct drm_file *file_priv,
335 			   struct drm_gem_object *obj,
336 			   u32 *handlep)
337 {
338 	struct drm_device *dev = obj->dev;
339 	int ret;
340 
341 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
342 
343 	/*
344 	 * Get the user-visible handle using idr.  Preload and perform
345 	 * allocation under our spinlock.
346 	 */
347 	idr_preload(GFP_KERNEL);
348 	spin_lock(&file_priv->table_lock);
349 
350 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
351 	drm_gem_object_reference(obj);
352 	obj->handle_count++;
353 	spin_unlock(&file_priv->table_lock);
354 	idr_preload_end();
355 	mutex_unlock(&dev->object_name_lock);
356 	if (ret < 0) {
357 		drm_gem_object_handle_unreference_unlocked(obj);
358 		return ret;
359 	}
360 	*handlep = ret;
361 
362 	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
363 	if (ret) {
364 		drm_gem_handle_delete(file_priv, *handlep);
365 		return ret;
366 	}
367 
368 	if (dev->driver->gem_open_object) {
369 		ret = dev->driver->gem_open_object(obj, file_priv);
370 		if (ret) {
371 			drm_gem_handle_delete(file_priv, *handlep);
372 			return ret;
373 		}
374 	}
375 
376 	return 0;
377 }
378 
379 /**
380  * Create a handle for this object. This adds a handle reference
381  * to the object, which includes a regular reference count. Callers
382  * will likely want to dereference the object afterwards.
383  */
384 int
385 drm_gem_handle_create(struct drm_file *file_priv,
386 		       struct drm_gem_object *obj,
387 		       u32 *handlep)
388 {
389 	mutex_lock(&obj->dev->object_name_lock);
390 
391 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
392 }
393 EXPORT_SYMBOL(drm_gem_handle_create);
394 
395 
396 /**
397  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
398  * @obj: obj in question
399  *
400  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
401  */
402 void
403 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
404 {
405 	struct drm_device *dev = obj->dev;
406 	struct drm_gem_mm *mm = dev->mm_private;
407 
408 	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
409 }
410 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
411 
412 /**
413  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
414  * @obj: obj in question
415  * @size: the virtual size
416  *
417  * GEM memory mapping works by handing back to userspace a fake mmap offset
418  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
419  * up the object based on the offset and sets up the various memory mapping
420  * structures.
421  *
422  * This routine allocates and attaches a fake offset for @obj, in cases where
423  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
424  * just use drm_gem_create_mmap_offset().
425  */
426 int
427 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
428 {
429 	struct drm_device *dev = obj->dev;
430 	struct drm_gem_mm *mm = dev->mm_private;
431 
432 	return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
433 				  size / PAGE_SIZE);
434 }
435 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
436 
437 /**
438  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
439  * @obj: obj in question
440  *
441  * GEM memory mapping works by handing back to userspace a fake mmap offset
442  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
443  * up the object based on the offset and sets up the various memory mapping
444  * structures.
445  *
446  * This routine allocates and attaches a fake offset for @obj.
447  */
448 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
449 {
450 	return drm_gem_create_mmap_offset_size(obj, obj->size);
451 }
452 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
453 
454 /**
455  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
456  * from shmem
457  * @obj: obj in question
458  * @gfpmask: gfp mask of requested pages
459  */
460 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
461 {
462 	struct inode *inode;
463 	struct address_space *mapping;
464 	struct page *p, **pages;
465 	int i, npages;
466 
467 	/* This is the shared memory object that backs the GEM resource */
468 	inode = file_inode(obj->filp);
469 	mapping = inode->i_mapping;
470 
471 	/* We already BUG_ON() for non-page-aligned sizes in
472 	 * drm_gem_object_init(), so we should never hit this unless
473 	 * driver author is doing something really wrong:
474 	 */
475 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
476 
477 	npages = obj->size >> PAGE_SHIFT;
478 
479 	pages = drm_malloc_ab(npages, sizeof(struct page *));
480 	if (pages == NULL)
481 		return ERR_PTR(-ENOMEM);
482 
483 	gfpmask |= mapping_gfp_mask(mapping);
484 
485 	for (i = 0; i < npages; i++) {
486 		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
487 		if (IS_ERR(p))
488 			goto fail;
489 		pages[i] = p;
490 
491 		/* There is a hypothetical issue w/ drivers that require
492 		 * buffer memory in the low 4GB.. if the pages are un-
493 		 * pinned, and swapped out, they can end up swapped back
494 		 * in above 4GB.  If pages are already in memory, then
495 		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
496 		 * even if the already in-memory page disobeys the mask.
497 		 *
498 		 * It is only a theoretical issue today, because none of
499 		 * the devices with this limitation can be populated with
500 		 * enough memory to trigger the issue.  But this BUG_ON()
501 		 * is here as a reminder in case the problem with
502 		 * shmem_read_mapping_page_gfp() isn't solved by the time
503 		 * it does become a real issue.
504 		 *
505 		 * See this thread: http://lkml.org/lkml/2011/7/11/238
506 		 */
507 		BUG_ON((gfpmask & __GFP_DMA32) &&
508 				(page_to_pfn(p) >= 0x00100000UL));
509 	}
510 
511 	return pages;
512 
513 fail:
514 	while (i--)
515 		page_cache_release(pages[i]);
516 
517 	drm_free_large(pages);
518 	return ERR_CAST(p);
519 }
520 EXPORT_SYMBOL(drm_gem_get_pages);
521 
522 /**
523  * drm_gem_put_pages - helper to free backing pages for a GEM object
524  * @obj: obj in question
525  * @pages: pages to free
526  * @dirty: if true, pages will be marked as dirty
527  * @accessed: if true, the pages will be marked as accessed
528  */
529 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
530 		bool dirty, bool accessed)
531 {
532 	int i, npages;
533 
534 	/* We already BUG_ON() for non-page-aligned sizes in
535 	 * drm_gem_object_init(), so we should never hit this unless
536 	 * driver author is doing something really wrong:
537 	 */
538 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
539 
540 	npages = obj->size >> PAGE_SHIFT;
541 
542 	for (i = 0; i < npages; i++) {
543 		if (dirty)
544 			set_page_dirty(pages[i]);
545 
546 		if (accessed)
547 			mark_page_accessed(pages[i]);
548 
549 		/* Undo the reference we took when populating the table */
550 		page_cache_release(pages[i]);
551 	}
552 
553 	drm_free_large(pages);
554 }
555 EXPORT_SYMBOL(drm_gem_put_pages);
556 
557 /** Returns a reference to the object named by the handle. */
558 struct drm_gem_object *
559 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
560 		      u32 handle)
561 {
562 	struct drm_gem_object *obj;
563 
564 	spin_lock(&filp->table_lock);
565 
566 	/* Check if we currently have a reference on the object */
567 	obj = idr_find(&filp->object_idr, handle);
568 	if (obj == NULL) {
569 		spin_unlock(&filp->table_lock);
570 		return NULL;
571 	}
572 
573 	drm_gem_object_reference(obj);
574 
575 	spin_unlock(&filp->table_lock);
576 
577 	return obj;
578 }
579 EXPORT_SYMBOL(drm_gem_object_lookup);
580 
581 /**
582  * Releases the handle to an mm object.
583  */
584 int
585 drm_gem_close_ioctl(struct drm_device *dev, void *data,
586 		    struct drm_file *file_priv)
587 {
588 	struct drm_gem_close *args = data;
589 	int ret;
590 
591 	if (!(dev->driver->driver_features & DRIVER_GEM))
592 		return -ENODEV;
593 
594 	ret = drm_gem_handle_delete(file_priv, args->handle);
595 
596 	return ret;
597 }
598 
599 /**
600  * Create a global name for an object, returning the name.
601  *
602  * Note that the name does not hold a reference; when the object
603  * is freed, the name goes away.
604  */
605 int
606 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
607 		    struct drm_file *file_priv)
608 {
609 	struct drm_gem_flink *args = data;
610 	struct drm_gem_object *obj;
611 	int ret;
612 
613 	if (!(dev->driver->driver_features & DRIVER_GEM))
614 		return -ENODEV;
615 
616 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
617 	if (obj == NULL)
618 		return -ENOENT;
619 
620 	mutex_lock(&dev->object_name_lock);
621 	idr_preload(GFP_KERNEL);
622 	/* prevent races with concurrent gem_close. */
623 	if (obj->handle_count == 0) {
624 		ret = -ENOENT;
625 		goto err;
626 	}
627 
628 	if (!obj->name) {
629 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
630 		if (ret < 0)
631 			goto err;
632 
633 		obj->name = ret;
634 
635 		/* Allocate a reference for the name table.  */
636 		drm_gem_object_reference(obj);
637 	}
638 
639 	args->name = (uint64_t) obj->name;
640 	ret = 0;
641 
642 err:
643 	idr_preload_end();
644 	mutex_unlock(&dev->object_name_lock);
645 	drm_gem_object_unreference_unlocked(obj);
646 	return ret;
647 }
648 
649 /**
650  * Open an object using the global name, returning a handle and the size.
651  *
652  * This handle (of course) holds a reference to the object, so the object
653  * will not go away until the handle is deleted.
654  */
655 int
656 drm_gem_open_ioctl(struct drm_device *dev, void *data,
657 		   struct drm_file *file_priv)
658 {
659 	struct drm_gem_open *args = data;
660 	struct drm_gem_object *obj;
661 	int ret;
662 	u32 handle;
663 
664 	if (!(dev->driver->driver_features & DRIVER_GEM))
665 		return -ENODEV;
666 
667 	mutex_lock(&dev->object_name_lock);
668 	obj = idr_find(&dev->object_name_idr, (int) args->name);
669 	if (obj) {
670 		drm_gem_object_reference(obj);
671 	} else {
672 		mutex_unlock(&dev->object_name_lock);
673 		return -ENOENT;
674 	}
675 
676 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
677 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
678 	drm_gem_object_unreference_unlocked(obj);
679 	if (ret)
680 		return ret;
681 
682 	args->handle = handle;
683 	args->size = obj->size;
684 
685 	return 0;
686 }
687 
688 /**
689  * Called at device open time, sets up the structure for handling refcounting
690  * of mm objects.
691  */
692 void
693 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
694 {
695 	idr_init(&file_private->object_idr);
696 	spin_lock_init(&file_private->table_lock);
697 }
698 
699 /**
700  * Called at device close to release the file's
701  * handle references on objects.
702  */
703 static int
704 drm_gem_object_release_handle(int id, void *ptr, void *data)
705 {
706 	struct drm_file *file_priv = data;
707 	struct drm_gem_object *obj = ptr;
708 	struct drm_device *dev = obj->dev;
709 
710 	if (drm_core_check_feature(dev, DRIVER_PRIME))
711 		drm_gem_remove_prime_handles(obj, file_priv);
712 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
713 
714 	if (dev->driver->gem_close_object)
715 		dev->driver->gem_close_object(obj, file_priv);
716 
717 	drm_gem_object_handle_unreference_unlocked(obj);
718 
719 	return 0;
720 }
721 
722 /**
723  * Called at close time when the filp is going away.
724  *
725  * Releases any remaining references on objects by this filp.
726  */
727 void
728 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
729 {
730 	idr_for_each(&file_private->object_idr,
731 		     &drm_gem_object_release_handle, file_private);
732 	idr_destroy(&file_private->object_idr);
733 }
734 
735 void
736 drm_gem_object_release(struct drm_gem_object *obj)
737 {
738 	WARN_ON(obj->dma_buf);
739 
740 	if (obj->filp)
741 	    fput(obj->filp);
742 }
743 EXPORT_SYMBOL(drm_gem_object_release);
744 
745 /**
746  * Called after the last reference to the object has been lost.
747  * Must be called holding struct_ mutex
748  *
749  * Frees the object
750  */
751 void
752 drm_gem_object_free(struct kref *kref)
753 {
754 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
755 	struct drm_device *dev = obj->dev;
756 
757 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
758 
759 	if (dev->driver->gem_free_object != NULL)
760 		dev->driver->gem_free_object(obj);
761 }
762 EXPORT_SYMBOL(drm_gem_object_free);
763 
764 void drm_gem_vm_open(struct vm_area_struct *vma)
765 {
766 	struct drm_gem_object *obj = vma->vm_private_data;
767 
768 	drm_gem_object_reference(obj);
769 
770 	mutex_lock(&obj->dev->struct_mutex);
771 	drm_vm_open_locked(obj->dev, vma);
772 	mutex_unlock(&obj->dev->struct_mutex);
773 }
774 EXPORT_SYMBOL(drm_gem_vm_open);
775 
776 void drm_gem_vm_close(struct vm_area_struct *vma)
777 {
778 	struct drm_gem_object *obj = vma->vm_private_data;
779 	struct drm_device *dev = obj->dev;
780 
781 	mutex_lock(&dev->struct_mutex);
782 	drm_vm_close_locked(obj->dev, vma);
783 	drm_gem_object_unreference(obj);
784 	mutex_unlock(&dev->struct_mutex);
785 }
786 EXPORT_SYMBOL(drm_gem_vm_close);
787 
788 /**
789  * drm_gem_mmap_obj - memory map a GEM object
790  * @obj: the GEM object to map
791  * @obj_size: the object size to be mapped, in bytes
792  * @vma: VMA for the area to be mapped
793  *
794  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
795  * provided by the driver. Depending on their requirements, drivers can either
796  * provide a fault handler in their gem_vm_ops (in which case any accesses to
797  * the object will be trapped, to perform migration, GTT binding, surface
798  * register allocation, or performance monitoring), or mmap the buffer memory
799  * synchronously after calling drm_gem_mmap_obj.
800  *
801  * This function is mainly intended to implement the DMABUF mmap operation, when
802  * the GEM object is not looked up based on its fake offset. To implement the
803  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
804  *
805  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
806  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
807  * callers must verify access restrictions before calling this helper.
808  *
809  * NOTE: This function has to be protected with dev->struct_mutex
810  *
811  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
812  * size, or if no gem_vm_ops are provided.
813  */
814 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
815 		     struct vm_area_struct *vma)
816 {
817 	struct drm_device *dev = obj->dev;
818 
819 	lockdep_assert_held(&dev->struct_mutex);
820 
821 	/* Check for valid size. */
822 	if (obj_size < vma->vm_end - vma->vm_start)
823 		return -EINVAL;
824 
825 	if (!dev->driver->gem_vm_ops)
826 		return -EINVAL;
827 
828 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
829 	vma->vm_ops = dev->driver->gem_vm_ops;
830 	vma->vm_private_data = obj;
831 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
832 
833 	/* Take a ref for this mapping of the object, so that the fault
834 	 * handler can dereference the mmap offset's pointer to the object.
835 	 * This reference is cleaned up by the corresponding vm_close
836 	 * (which should happen whether the vma was created by this call, or
837 	 * by a vm_open due to mremap or partial unmap or whatever).
838 	 */
839 	drm_gem_object_reference(obj);
840 
841 	drm_vm_open_locked(dev, vma);
842 	return 0;
843 }
844 EXPORT_SYMBOL(drm_gem_mmap_obj);
845 
846 /**
847  * drm_gem_mmap - memory map routine for GEM objects
848  * @filp: DRM file pointer
849  * @vma: VMA for the area to be mapped
850  *
851  * If a driver supports GEM object mapping, mmap calls on the DRM file
852  * descriptor will end up here.
853  *
854  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
855  * contain the fake offset we created when the GTT map ioctl was called on
856  * the object) and map it with a call to drm_gem_mmap_obj().
857  *
858  * If the caller is not granted access to the buffer object, the mmap will fail
859  * with EACCES. Please see the vma manager for more information.
860  */
861 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
862 {
863 	struct drm_file *priv = filp->private_data;
864 	struct drm_device *dev = priv->minor->dev;
865 	struct drm_gem_mm *mm = dev->mm_private;
866 	struct drm_gem_object *obj;
867 	struct drm_vma_offset_node *node;
868 	int ret = 0;
869 
870 	if (drm_device_is_unplugged(dev))
871 		return -ENODEV;
872 
873 	mutex_lock(&dev->struct_mutex);
874 
875 	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
876 					   vma_pages(vma));
877 	if (!node) {
878 		mutex_unlock(&dev->struct_mutex);
879 		return drm_mmap(filp, vma);
880 	} else if (!drm_vma_node_is_allowed(node, filp)) {
881 		mutex_unlock(&dev->struct_mutex);
882 		return -EACCES;
883 	}
884 
885 	obj = container_of(node, struct drm_gem_object, vma_node);
886 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
887 
888 	mutex_unlock(&dev->struct_mutex);
889 
890 	return ret;
891 }
892 EXPORT_SYMBOL(drm_gem_mmap);
893