xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision 9be08a27)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <drm/drmP.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include <drm/drm_print.h>
44 #include "drm_internal.h"
45 
46 /** @file drm_gem.c
47  *
48  * This file provides some of the base ioctls and library routines for
49  * the graphics memory manager implemented by each device driver.
50  *
51  * Because various devices have different requirements in terms of
52  * synchronization and migration strategies, implementing that is left up to
53  * the driver, and all that the general API provides should be generic --
54  * allocating objects, reading/writing data with the cpu, freeing objects.
55  * Even there, platform-dependent optimizations for reading/writing data with
56  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
57  * the DRI2 implementation wants to have at least allocate/mmap be generic.
58  *
59  * The goal was to have swap-backed object allocation managed through
60  * struct file.  However, file descriptors as handles to a struct file have
61  * two major failings:
62  * - Process limits prevent more than 1024 or so being used at a time by
63  *   default.
64  * - Inability to allocate high fds will aggravate the X Server's select()
65  *   handling, and likely that of many GL client applications as well.
66  *
67  * This led to a plan of using our own integer IDs (called handles, following
68  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
69  * ioctls.  The objects themselves will still include the struct file so
70  * that we can transition to fds if the required kernel infrastructure shows
71  * up at a later date, and as our interface with shmfs for memory allocation.
72  */
73 
74 /*
75  * We make up offsets for buffer objects so we can recognize them at
76  * mmap time.
77  */
78 
79 /* pgoff in mmap is an unsigned long, so we need to make sure that
80  * the faked up offset will fit
81  */
82 
83 #if BITS_PER_LONG == 64
84 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
85 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
86 #else
87 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
88 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
89 #endif
90 
91 /**
92  * drm_gem_init - Initialize the GEM device fields
93  * @dev: drm_devic structure to initialize
94  */
95 int
96 drm_gem_init(struct drm_device *dev)
97 {
98 	struct drm_vma_offset_manager *vma_offset_manager;
99 
100 	mutex_init(&dev->object_name_lock);
101 	idr_init_base(&dev->object_name_idr, 1);
102 
103 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
104 	if (!vma_offset_manager) {
105 		DRM_ERROR("out of memory\n");
106 		return -ENOMEM;
107 	}
108 
109 	dev->vma_offset_manager = vma_offset_manager;
110 	drm_vma_offset_manager_init(vma_offset_manager,
111 				    DRM_FILE_PAGE_OFFSET_START,
112 				    DRM_FILE_PAGE_OFFSET_SIZE);
113 
114 	return 0;
115 }
116 
117 void
118 drm_gem_destroy(struct drm_device *dev)
119 {
120 
121 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
122 	kfree(dev->vma_offset_manager);
123 	dev->vma_offset_manager = NULL;
124 }
125 
126 /**
127  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
128  * @dev: drm_device the object should be initialized for
129  * @obj: drm_gem_object to initialize
130  * @size: object size
131  *
132  * Initialize an already allocated GEM object of the specified size with
133  * shmfs backing store.
134  */
135 int drm_gem_object_init(struct drm_device *dev,
136 			struct drm_gem_object *obj, size_t size)
137 {
138 	struct file *filp;
139 
140 	drm_gem_private_object_init(dev, obj, size);
141 
142 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 	if (IS_ERR(filp))
144 		return PTR_ERR(filp);
145 
146 	obj->filp = filp;
147 
148 	return 0;
149 }
150 EXPORT_SYMBOL(drm_gem_object_init);
151 
152 /**
153  * drm_gem_private_object_init - initialize an allocated private GEM object
154  * @dev: drm_device the object should be initialized for
155  * @obj: drm_gem_object to initialize
156  * @size: object size
157  *
158  * Initialize an already allocated GEM object of the specified size with
159  * no GEM provided backing store. Instead the caller is responsible for
160  * backing the object and handling it.
161  */
162 void drm_gem_private_object_init(struct drm_device *dev,
163 				 struct drm_gem_object *obj, size_t size)
164 {
165 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
166 
167 	obj->dev = dev;
168 	obj->filp = NULL;
169 
170 	kref_init(&obj->refcount);
171 	obj->handle_count = 0;
172 	obj->size = size;
173 	drm_vma_node_reset(&obj->vma_node);
174 }
175 EXPORT_SYMBOL(drm_gem_private_object_init);
176 
177 static void
178 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
179 {
180 	/*
181 	 * Note: obj->dma_buf can't disappear as long as we still hold a
182 	 * handle reference in obj->handle_count.
183 	 */
184 	mutex_lock(&filp->prime.lock);
185 	if (obj->dma_buf) {
186 		drm_prime_remove_buf_handle_locked(&filp->prime,
187 						   obj->dma_buf);
188 	}
189 	mutex_unlock(&filp->prime.lock);
190 }
191 
192 /**
193  * drm_gem_object_handle_free - release resources bound to userspace handles
194  * @obj: GEM object to clean up.
195  *
196  * Called after the last handle to the object has been closed
197  *
198  * Removes any name for the object. Note that this must be
199  * called before drm_gem_object_free or we'll be touching
200  * freed memory
201  */
202 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
203 {
204 	struct drm_device *dev = obj->dev;
205 
206 	/* Remove any name for this object */
207 	if (obj->name) {
208 		idr_remove(&dev->object_name_idr, obj->name);
209 		obj->name = 0;
210 	}
211 }
212 
213 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
214 {
215 	/* Unbreak the reference cycle if we have an exported dma_buf. */
216 	if (obj->dma_buf) {
217 		dma_buf_put(obj->dma_buf);
218 		obj->dma_buf = NULL;
219 	}
220 }
221 
222 static void
223 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
224 {
225 	struct drm_device *dev = obj->dev;
226 	bool final = false;
227 
228 	if (WARN_ON(obj->handle_count == 0))
229 		return;
230 
231 	/*
232 	* Must bump handle count first as this may be the last
233 	* ref, in which case the object would disappear before we
234 	* checked for a name
235 	*/
236 
237 	mutex_lock(&dev->object_name_lock);
238 	if (--obj->handle_count == 0) {
239 		drm_gem_object_handle_free(obj);
240 		drm_gem_object_exported_dma_buf_free(obj);
241 		final = true;
242 	}
243 	mutex_unlock(&dev->object_name_lock);
244 
245 	if (final)
246 		drm_gem_object_put_unlocked(obj);
247 }
248 
249 /*
250  * Called at device or object close to release the file's
251  * handle references on objects.
252  */
253 static int
254 drm_gem_object_release_handle(int id, void *ptr, void *data)
255 {
256 	struct drm_file *file_priv = data;
257 	struct drm_gem_object *obj = ptr;
258 	struct drm_device *dev = obj->dev;
259 
260 	if (dev->driver->gem_close_object)
261 		dev->driver->gem_close_object(obj, file_priv);
262 
263 	if (drm_core_check_feature(dev, DRIVER_PRIME))
264 		drm_gem_remove_prime_handles(obj, file_priv);
265 	drm_vma_node_revoke(&obj->vma_node, file_priv);
266 
267 	drm_gem_object_handle_put_unlocked(obj);
268 
269 	return 0;
270 }
271 
272 /**
273  * drm_gem_handle_delete - deletes the given file-private handle
274  * @filp: drm file-private structure to use for the handle look up
275  * @handle: userspace handle to delete
276  *
277  * Removes the GEM handle from the @filp lookup table which has been added with
278  * drm_gem_handle_create(). If this is the last handle also cleans up linked
279  * resources like GEM names.
280  */
281 int
282 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
283 {
284 	struct drm_gem_object *obj;
285 
286 	spin_lock(&filp->table_lock);
287 
288 	/* Check if we currently have a reference on the object */
289 	obj = idr_replace(&filp->object_idr, NULL, handle);
290 	spin_unlock(&filp->table_lock);
291 	if (IS_ERR_OR_NULL(obj))
292 		return -EINVAL;
293 
294 	/* Release driver's reference and decrement refcount. */
295 	drm_gem_object_release_handle(handle, obj, filp);
296 
297 	/* And finally make the handle available for future allocations. */
298 	spin_lock(&filp->table_lock);
299 	idr_remove(&filp->object_idr, handle);
300 	spin_unlock(&filp->table_lock);
301 
302 	return 0;
303 }
304 EXPORT_SYMBOL(drm_gem_handle_delete);
305 
306 /**
307  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
308  * @file: drm file-private structure containing the gem object
309  * @dev: corresponding drm_device
310  * @handle: gem object handle
311  * @offset: return location for the fake mmap offset
312  *
313  * This implements the &drm_driver.dumb_map_offset kms driver callback for
314  * drivers which use gem to manage their backing storage.
315  *
316  * Returns:
317  * 0 on success or a negative error code on failure.
318  */
319 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
320 			    u32 handle, u64 *offset)
321 {
322 	struct drm_gem_object *obj;
323 	int ret;
324 
325 	obj = drm_gem_object_lookup(file, handle);
326 	if (!obj)
327 		return -ENOENT;
328 
329 	/* Don't allow imported objects to be mapped */
330 	if (obj->import_attach) {
331 		ret = -EINVAL;
332 		goto out;
333 	}
334 
335 	ret = drm_gem_create_mmap_offset(obj);
336 	if (ret)
337 		goto out;
338 
339 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
340 out:
341 	drm_gem_object_put_unlocked(obj);
342 
343 	return ret;
344 }
345 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
346 
347 /**
348  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
349  * @file: drm file-private structure to remove the dumb handle from
350  * @dev: corresponding drm_device
351  * @handle: the dumb handle to remove
352  *
353  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
354  * which use gem to manage their backing storage.
355  */
356 int drm_gem_dumb_destroy(struct drm_file *file,
357 			 struct drm_device *dev,
358 			 uint32_t handle)
359 {
360 	return drm_gem_handle_delete(file, handle);
361 }
362 EXPORT_SYMBOL(drm_gem_dumb_destroy);
363 
364 /**
365  * drm_gem_handle_create_tail - internal functions to create a handle
366  * @file_priv: drm file-private structure to register the handle for
367  * @obj: object to register
368  * @handlep: pointer to return the created handle to the caller
369  *
370  * This expects the &drm_device.object_name_lock to be held already and will
371  * drop it before returning. Used to avoid races in establishing new handles
372  * when importing an object from either an flink name or a dma-buf.
373  *
374  * Handles must be release again through drm_gem_handle_delete(). This is done
375  * when userspace closes @file_priv for all attached handles, or through the
376  * GEM_CLOSE ioctl for individual handles.
377  */
378 int
379 drm_gem_handle_create_tail(struct drm_file *file_priv,
380 			   struct drm_gem_object *obj,
381 			   u32 *handlep)
382 {
383 	struct drm_device *dev = obj->dev;
384 	u32 handle;
385 	int ret;
386 
387 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
388 	if (obj->handle_count++ == 0)
389 		drm_gem_object_get(obj);
390 
391 	/*
392 	 * Get the user-visible handle using idr.  Preload and perform
393 	 * allocation under our spinlock.
394 	 */
395 	idr_preload(GFP_KERNEL);
396 	spin_lock(&file_priv->table_lock);
397 
398 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
399 
400 	spin_unlock(&file_priv->table_lock);
401 	idr_preload_end();
402 
403 	mutex_unlock(&dev->object_name_lock);
404 	if (ret < 0)
405 		goto err_unref;
406 
407 	handle = ret;
408 
409 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
410 	if (ret)
411 		goto err_remove;
412 
413 	if (dev->driver->gem_open_object) {
414 		ret = dev->driver->gem_open_object(obj, file_priv);
415 		if (ret)
416 			goto err_revoke;
417 	}
418 
419 	*handlep = handle;
420 	return 0;
421 
422 err_revoke:
423 	drm_vma_node_revoke(&obj->vma_node, file_priv);
424 err_remove:
425 	spin_lock(&file_priv->table_lock);
426 	idr_remove(&file_priv->object_idr, handle);
427 	spin_unlock(&file_priv->table_lock);
428 err_unref:
429 	drm_gem_object_handle_put_unlocked(obj);
430 	return ret;
431 }
432 
433 /**
434  * drm_gem_handle_create - create a gem handle for an object
435  * @file_priv: drm file-private structure to register the handle for
436  * @obj: object to register
437  * @handlep: pionter to return the created handle to the caller
438  *
439  * Create a handle for this object. This adds a handle reference to the object,
440  * which includes a regular reference count. Callers will likely want to
441  * dereference the object afterwards.
442  *
443  * Since this publishes @obj to userspace it must be fully set up by this point,
444  * drivers must call this last in their buffer object creation callbacks.
445  */
446 int drm_gem_handle_create(struct drm_file *file_priv,
447 			  struct drm_gem_object *obj,
448 			  u32 *handlep)
449 {
450 	mutex_lock(&obj->dev->object_name_lock);
451 
452 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
453 }
454 EXPORT_SYMBOL(drm_gem_handle_create);
455 
456 
457 /**
458  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
459  * @obj: obj in question
460  *
461  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
462  *
463  * Note that drm_gem_object_release() already calls this function, so drivers
464  * don't have to take care of releasing the mmap offset themselves when freeing
465  * the GEM object.
466  */
467 void
468 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
469 {
470 	struct drm_device *dev = obj->dev;
471 
472 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
473 }
474 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
475 
476 /**
477  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
478  * @obj: obj in question
479  * @size: the virtual size
480  *
481  * GEM memory mapping works by handing back to userspace a fake mmap offset
482  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
483  * up the object based on the offset and sets up the various memory mapping
484  * structures.
485  *
486  * This routine allocates and attaches a fake offset for @obj, in cases where
487  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
488  * Otherwise just use drm_gem_create_mmap_offset().
489  *
490  * This function is idempotent and handles an already allocated mmap offset
491  * transparently. Drivers do not need to check for this case.
492  */
493 int
494 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
495 {
496 	struct drm_device *dev = obj->dev;
497 
498 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
499 				  size / PAGE_SIZE);
500 }
501 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
502 
503 /**
504  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
505  * @obj: obj in question
506  *
507  * GEM memory mapping works by handing back to userspace a fake mmap offset
508  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
509  * up the object based on the offset and sets up the various memory mapping
510  * structures.
511  *
512  * This routine allocates and attaches a fake offset for @obj.
513  *
514  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
515  * the fake offset again.
516  */
517 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
518 {
519 	return drm_gem_create_mmap_offset_size(obj, obj->size);
520 }
521 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
522 
523 /**
524  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
525  * from shmem
526  * @obj: obj in question
527  *
528  * This reads the page-array of the shmem-backing storage of the given gem
529  * object. An array of pages is returned. If a page is not allocated or
530  * swapped-out, this will allocate/swap-in the required pages. Note that the
531  * whole object is covered by the page-array and pinned in memory.
532  *
533  * Use drm_gem_put_pages() to release the array and unpin all pages.
534  *
535  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
536  * If you require other GFP-masks, you have to do those allocations yourself.
537  *
538  * Note that you are not allowed to change gfp-zones during runtime. That is,
539  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
540  * set during initialization. If you have special zone constraints, set them
541  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
542  * to keep pages in the required zone during swap-in.
543  */
544 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
545 {
546 	struct address_space *mapping;
547 	struct page *p, **pages;
548 	int i, npages;
549 
550 	/* This is the shared memory object that backs the GEM resource */
551 	mapping = obj->filp->f_mapping;
552 
553 	/* We already BUG_ON() for non-page-aligned sizes in
554 	 * drm_gem_object_init(), so we should never hit this unless
555 	 * driver author is doing something really wrong:
556 	 */
557 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
558 
559 	npages = obj->size >> PAGE_SHIFT;
560 
561 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
562 	if (pages == NULL)
563 		return ERR_PTR(-ENOMEM);
564 
565 	for (i = 0; i < npages; i++) {
566 		p = shmem_read_mapping_page(mapping, i);
567 		if (IS_ERR(p))
568 			goto fail;
569 		pages[i] = p;
570 
571 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
572 		 * correct region during swapin. Note that this requires
573 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
574 		 * so shmem can relocate pages during swapin if required.
575 		 */
576 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
577 				(page_to_pfn(p) >= 0x00100000UL));
578 	}
579 
580 	return pages;
581 
582 fail:
583 	while (i--)
584 		put_page(pages[i]);
585 
586 	kvfree(pages);
587 	return ERR_CAST(p);
588 }
589 EXPORT_SYMBOL(drm_gem_get_pages);
590 
591 /**
592  * drm_gem_put_pages - helper to free backing pages for a GEM object
593  * @obj: obj in question
594  * @pages: pages to free
595  * @dirty: if true, pages will be marked as dirty
596  * @accessed: if true, the pages will be marked as accessed
597  */
598 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
599 		bool dirty, bool accessed)
600 {
601 	int i, npages;
602 
603 	/* We already BUG_ON() for non-page-aligned sizes in
604 	 * drm_gem_object_init(), so we should never hit this unless
605 	 * driver author is doing something really wrong:
606 	 */
607 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
608 
609 	npages = obj->size >> PAGE_SHIFT;
610 
611 	for (i = 0; i < npages; i++) {
612 		if (dirty)
613 			set_page_dirty(pages[i]);
614 
615 		if (accessed)
616 			mark_page_accessed(pages[i]);
617 
618 		/* Undo the reference we took when populating the table */
619 		put_page(pages[i]);
620 	}
621 
622 	kvfree(pages);
623 }
624 EXPORT_SYMBOL(drm_gem_put_pages);
625 
626 /**
627  * drm_gem_object_lookup - look up a GEM object from it's handle
628  * @filp: DRM file private date
629  * @handle: userspace handle
630  *
631  * Returns:
632  *
633  * A reference to the object named by the handle if such exists on @filp, NULL
634  * otherwise.
635  */
636 struct drm_gem_object *
637 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
638 {
639 	struct drm_gem_object *obj;
640 
641 	spin_lock(&filp->table_lock);
642 
643 	/* Check if we currently have a reference on the object */
644 	obj = idr_find(&filp->object_idr, handle);
645 	if (obj)
646 		drm_gem_object_get(obj);
647 
648 	spin_unlock(&filp->table_lock);
649 
650 	return obj;
651 }
652 EXPORT_SYMBOL(drm_gem_object_lookup);
653 
654 /**
655  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
656  * @dev: drm_device
657  * @data: ioctl data
658  * @file_priv: drm file-private structure
659  *
660  * Releases the handle to an mm object.
661  */
662 int
663 drm_gem_close_ioctl(struct drm_device *dev, void *data,
664 		    struct drm_file *file_priv)
665 {
666 	struct drm_gem_close *args = data;
667 	int ret;
668 
669 	if (!drm_core_check_feature(dev, DRIVER_GEM))
670 		return -ENODEV;
671 
672 	ret = drm_gem_handle_delete(file_priv, args->handle);
673 
674 	return ret;
675 }
676 
677 /**
678  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
679  * @dev: drm_device
680  * @data: ioctl data
681  * @file_priv: drm file-private structure
682  *
683  * Create a global name for an object, returning the name.
684  *
685  * Note that the name does not hold a reference; when the object
686  * is freed, the name goes away.
687  */
688 int
689 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
690 		    struct drm_file *file_priv)
691 {
692 	struct drm_gem_flink *args = data;
693 	struct drm_gem_object *obj;
694 	int ret;
695 
696 	if (!drm_core_check_feature(dev, DRIVER_GEM))
697 		return -ENODEV;
698 
699 	obj = drm_gem_object_lookup(file_priv, args->handle);
700 	if (obj == NULL)
701 		return -ENOENT;
702 
703 	mutex_lock(&dev->object_name_lock);
704 	/* prevent races with concurrent gem_close. */
705 	if (obj->handle_count == 0) {
706 		ret = -ENOENT;
707 		goto err;
708 	}
709 
710 	if (!obj->name) {
711 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
712 		if (ret < 0)
713 			goto err;
714 
715 		obj->name = ret;
716 	}
717 
718 	args->name = (uint64_t) obj->name;
719 	ret = 0;
720 
721 err:
722 	mutex_unlock(&dev->object_name_lock);
723 	drm_gem_object_put_unlocked(obj);
724 	return ret;
725 }
726 
727 /**
728  * drm_gem_open - implementation of the GEM_OPEN ioctl
729  * @dev: drm_device
730  * @data: ioctl data
731  * @file_priv: drm file-private structure
732  *
733  * Open an object using the global name, returning a handle and the size.
734  *
735  * This handle (of course) holds a reference to the object, so the object
736  * will not go away until the handle is deleted.
737  */
738 int
739 drm_gem_open_ioctl(struct drm_device *dev, void *data,
740 		   struct drm_file *file_priv)
741 {
742 	struct drm_gem_open *args = data;
743 	struct drm_gem_object *obj;
744 	int ret;
745 	u32 handle;
746 
747 	if (!drm_core_check_feature(dev, DRIVER_GEM))
748 		return -ENODEV;
749 
750 	mutex_lock(&dev->object_name_lock);
751 	obj = idr_find(&dev->object_name_idr, (int) args->name);
752 	if (obj) {
753 		drm_gem_object_get(obj);
754 	} else {
755 		mutex_unlock(&dev->object_name_lock);
756 		return -ENOENT;
757 	}
758 
759 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
760 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
761 	drm_gem_object_put_unlocked(obj);
762 	if (ret)
763 		return ret;
764 
765 	args->handle = handle;
766 	args->size = obj->size;
767 
768 	return 0;
769 }
770 
771 /**
772  * gem_gem_open - initalizes GEM file-private structures at devnode open time
773  * @dev: drm_device which is being opened by userspace
774  * @file_private: drm file-private structure to set up
775  *
776  * Called at device open time, sets up the structure for handling refcounting
777  * of mm objects.
778  */
779 void
780 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
781 {
782 	idr_init_base(&file_private->object_idr, 1);
783 	spin_lock_init(&file_private->table_lock);
784 }
785 
786 /**
787  * drm_gem_release - release file-private GEM resources
788  * @dev: drm_device which is being closed by userspace
789  * @file_private: drm file-private structure to clean up
790  *
791  * Called at close time when the filp is going away.
792  *
793  * Releases any remaining references on objects by this filp.
794  */
795 void
796 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
797 {
798 	idr_for_each(&file_private->object_idr,
799 		     &drm_gem_object_release_handle, file_private);
800 	idr_destroy(&file_private->object_idr);
801 }
802 
803 /**
804  * drm_gem_object_release - release GEM buffer object resources
805  * @obj: GEM buffer object
806  *
807  * This releases any structures and resources used by @obj and is the invers of
808  * drm_gem_object_init().
809  */
810 void
811 drm_gem_object_release(struct drm_gem_object *obj)
812 {
813 	WARN_ON(obj->dma_buf);
814 
815 	if (obj->filp)
816 		fput(obj->filp);
817 
818 	drm_gem_free_mmap_offset(obj);
819 }
820 EXPORT_SYMBOL(drm_gem_object_release);
821 
822 /**
823  * drm_gem_object_free - free a GEM object
824  * @kref: kref of the object to free
825  *
826  * Called after the last reference to the object has been lost.
827  * Must be called holding &drm_device.struct_mutex.
828  *
829  * Frees the object
830  */
831 void
832 drm_gem_object_free(struct kref *kref)
833 {
834 	struct drm_gem_object *obj =
835 		container_of(kref, struct drm_gem_object, refcount);
836 	struct drm_device *dev = obj->dev;
837 
838 	if (dev->driver->gem_free_object_unlocked) {
839 		dev->driver->gem_free_object_unlocked(obj);
840 	} else if (dev->driver->gem_free_object) {
841 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
842 
843 		dev->driver->gem_free_object(obj);
844 	}
845 }
846 EXPORT_SYMBOL(drm_gem_object_free);
847 
848 /**
849  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
850  * @obj: GEM buffer object
851  *
852  * This releases a reference to @obj. Callers must not hold the
853  * &drm_device.struct_mutex lock when calling this function.
854  *
855  * See also __drm_gem_object_put().
856  */
857 void
858 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
859 {
860 	struct drm_device *dev;
861 
862 	if (!obj)
863 		return;
864 
865 	dev = obj->dev;
866 
867 	if (dev->driver->gem_free_object_unlocked) {
868 		kref_put(&obj->refcount, drm_gem_object_free);
869 	} else {
870 		might_lock(&dev->struct_mutex);
871 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
872 				&dev->struct_mutex))
873 			mutex_unlock(&dev->struct_mutex);
874 	}
875 }
876 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
877 
878 /**
879  * drm_gem_object_put - release a GEM buffer object reference
880  * @obj: GEM buffer object
881  *
882  * This releases a reference to @obj. Callers must hold the
883  * &drm_device.struct_mutex lock when calling this function, even when the
884  * driver doesn't use &drm_device.struct_mutex for anything.
885  *
886  * For drivers not encumbered with legacy locking use
887  * drm_gem_object_put_unlocked() instead.
888  */
889 void
890 drm_gem_object_put(struct drm_gem_object *obj)
891 {
892 	if (obj) {
893 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
894 
895 		kref_put(&obj->refcount, drm_gem_object_free);
896 	}
897 }
898 EXPORT_SYMBOL(drm_gem_object_put);
899 
900 /**
901  * drm_gem_vm_open - vma->ops->open implementation for GEM
902  * @vma: VM area structure
903  *
904  * This function implements the #vm_operations_struct open() callback for GEM
905  * drivers. This must be used together with drm_gem_vm_close().
906  */
907 void drm_gem_vm_open(struct vm_area_struct *vma)
908 {
909 	struct drm_gem_object *obj = vma->vm_private_data;
910 
911 	drm_gem_object_get(obj);
912 }
913 EXPORT_SYMBOL(drm_gem_vm_open);
914 
915 /**
916  * drm_gem_vm_close - vma->ops->close implementation for GEM
917  * @vma: VM area structure
918  *
919  * This function implements the #vm_operations_struct close() callback for GEM
920  * drivers. This must be used together with drm_gem_vm_open().
921  */
922 void drm_gem_vm_close(struct vm_area_struct *vma)
923 {
924 	struct drm_gem_object *obj = vma->vm_private_data;
925 
926 	drm_gem_object_put_unlocked(obj);
927 }
928 EXPORT_SYMBOL(drm_gem_vm_close);
929 
930 /**
931  * drm_gem_mmap_obj - memory map a GEM object
932  * @obj: the GEM object to map
933  * @obj_size: the object size to be mapped, in bytes
934  * @vma: VMA for the area to be mapped
935  *
936  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
937  * provided by the driver. Depending on their requirements, drivers can either
938  * provide a fault handler in their gem_vm_ops (in which case any accesses to
939  * the object will be trapped, to perform migration, GTT binding, surface
940  * register allocation, or performance monitoring), or mmap the buffer memory
941  * synchronously after calling drm_gem_mmap_obj.
942  *
943  * This function is mainly intended to implement the DMABUF mmap operation, when
944  * the GEM object is not looked up based on its fake offset. To implement the
945  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
946  *
947  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
948  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
949  * callers must verify access restrictions before calling this helper.
950  *
951  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
952  * size, or if no gem_vm_ops are provided.
953  */
954 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
955 		     struct vm_area_struct *vma)
956 {
957 	struct drm_device *dev = obj->dev;
958 
959 	/* Check for valid size. */
960 	if (obj_size < vma->vm_end - vma->vm_start)
961 		return -EINVAL;
962 
963 	if (!dev->driver->gem_vm_ops)
964 		return -EINVAL;
965 
966 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
967 	vma->vm_ops = dev->driver->gem_vm_ops;
968 	vma->vm_private_data = obj;
969 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
970 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
971 
972 	/* Take a ref for this mapping of the object, so that the fault
973 	 * handler can dereference the mmap offset's pointer to the object.
974 	 * This reference is cleaned up by the corresponding vm_close
975 	 * (which should happen whether the vma was created by this call, or
976 	 * by a vm_open due to mremap or partial unmap or whatever).
977 	 */
978 	drm_gem_object_get(obj);
979 
980 	return 0;
981 }
982 EXPORT_SYMBOL(drm_gem_mmap_obj);
983 
984 /**
985  * drm_gem_mmap - memory map routine for GEM objects
986  * @filp: DRM file pointer
987  * @vma: VMA for the area to be mapped
988  *
989  * If a driver supports GEM object mapping, mmap calls on the DRM file
990  * descriptor will end up here.
991  *
992  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
993  * contain the fake offset we created when the GTT map ioctl was called on
994  * the object) and map it with a call to drm_gem_mmap_obj().
995  *
996  * If the caller is not granted access to the buffer object, the mmap will fail
997  * with EACCES. Please see the vma manager for more information.
998  */
999 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1000 {
1001 	struct drm_file *priv = filp->private_data;
1002 	struct drm_device *dev = priv->minor->dev;
1003 	struct drm_gem_object *obj = NULL;
1004 	struct drm_vma_offset_node *node;
1005 	int ret;
1006 
1007 	if (drm_dev_is_unplugged(dev))
1008 		return -ENODEV;
1009 
1010 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1011 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1012 						  vma->vm_pgoff,
1013 						  vma_pages(vma));
1014 	if (likely(node)) {
1015 		obj = container_of(node, struct drm_gem_object, vma_node);
1016 		/*
1017 		 * When the object is being freed, after it hits 0-refcnt it
1018 		 * proceeds to tear down the object. In the process it will
1019 		 * attempt to remove the VMA offset and so acquire this
1020 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1021 		 * that matches our range, we know it is in the process of being
1022 		 * destroyed and will be freed as soon as we release the lock -
1023 		 * so we have to check for the 0-refcnted object and treat it as
1024 		 * invalid.
1025 		 */
1026 		if (!kref_get_unless_zero(&obj->refcount))
1027 			obj = NULL;
1028 	}
1029 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1030 
1031 	if (!obj)
1032 		return -EINVAL;
1033 
1034 	if (!drm_vma_node_is_allowed(node, priv)) {
1035 		drm_gem_object_put_unlocked(obj);
1036 		return -EACCES;
1037 	}
1038 
1039 	if (node->readonly) {
1040 		if (vma->vm_flags & VM_WRITE) {
1041 			drm_gem_object_put_unlocked(obj);
1042 			return -EINVAL;
1043 		}
1044 
1045 		vma->vm_flags &= ~VM_MAYWRITE;
1046 	}
1047 
1048 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1049 			       vma);
1050 
1051 	drm_gem_object_put_unlocked(obj);
1052 
1053 	return ret;
1054 }
1055 EXPORT_SYMBOL(drm_gem_mmap);
1056 
1057 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1058 			const struct drm_gem_object *obj)
1059 {
1060 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1061 	drm_printf_indent(p, indent, "refcount=%u\n",
1062 			  kref_read(&obj->refcount));
1063 	drm_printf_indent(p, indent, "start=%08lx\n",
1064 			  drm_vma_node_start(&obj->vma_node));
1065 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1066 	drm_printf_indent(p, indent, "imported=%s\n",
1067 			  obj->import_attach ? "yes" : "no");
1068 
1069 	if (obj->dev->driver->gem_print_info)
1070 		obj->dev->driver->gem_print_info(p, indent, obj);
1071 }
1072