xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision ccb01374)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <drm/drmP.h>
41 #include <drm/drm_vma_manager.h>
42 #include <drm/drm_gem.h>
43 #include <drm/drm_print.h>
44 #include "drm_internal.h"
45 
46 /** @file drm_gem.c
47  *
48  * This file provides some of the base ioctls and library routines for
49  * the graphics memory manager implemented by each device driver.
50  *
51  * Because various devices have different requirements in terms of
52  * synchronization and migration strategies, implementing that is left up to
53  * the driver, and all that the general API provides should be generic --
54  * allocating objects, reading/writing data with the cpu, freeing objects.
55  * Even there, platform-dependent optimizations for reading/writing data with
56  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
57  * the DRI2 implementation wants to have at least allocate/mmap be generic.
58  *
59  * The goal was to have swap-backed object allocation managed through
60  * struct file.  However, file descriptors as handles to a struct file have
61  * two major failings:
62  * - Process limits prevent more than 1024 or so being used at a time by
63  *   default.
64  * - Inability to allocate high fds will aggravate the X Server's select()
65  *   handling, and likely that of many GL client applications as well.
66  *
67  * This led to a plan of using our own integer IDs (called handles, following
68  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
69  * ioctls.  The objects themselves will still include the struct file so
70  * that we can transition to fds if the required kernel infrastructure shows
71  * up at a later date, and as our interface with shmfs for memory allocation.
72  */
73 
74 /*
75  * We make up offsets for buffer objects so we can recognize them at
76  * mmap time.
77  */
78 
79 /* pgoff in mmap is an unsigned long, so we need to make sure that
80  * the faked up offset will fit
81  */
82 
83 #if BITS_PER_LONG == 64
84 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
85 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
86 #else
87 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
88 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
89 #endif
90 
91 /**
92  * drm_gem_init - Initialize the GEM device fields
93  * @dev: drm_devic structure to initialize
94  */
95 int
96 drm_gem_init(struct drm_device *dev)
97 {
98 	struct drm_vma_offset_manager *vma_offset_manager;
99 
100 	mutex_init(&dev->object_name_lock);
101 	idr_init_base(&dev->object_name_idr, 1);
102 
103 	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
104 	if (!vma_offset_manager) {
105 		DRM_ERROR("out of memory\n");
106 		return -ENOMEM;
107 	}
108 
109 	dev->vma_offset_manager = vma_offset_manager;
110 	drm_vma_offset_manager_init(vma_offset_manager,
111 				    DRM_FILE_PAGE_OFFSET_START,
112 				    DRM_FILE_PAGE_OFFSET_SIZE);
113 
114 	return 0;
115 }
116 
117 void
118 drm_gem_destroy(struct drm_device *dev)
119 {
120 
121 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
122 	kfree(dev->vma_offset_manager);
123 	dev->vma_offset_manager = NULL;
124 }
125 
126 /**
127  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
128  * @dev: drm_device the object should be initialized for
129  * @obj: drm_gem_object to initialize
130  * @size: object size
131  *
132  * Initialize an already allocated GEM object of the specified size with
133  * shmfs backing store.
134  */
135 int drm_gem_object_init(struct drm_device *dev,
136 			struct drm_gem_object *obj, size_t size)
137 {
138 	struct file *filp;
139 
140 	drm_gem_private_object_init(dev, obj, size);
141 
142 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
143 	if (IS_ERR(filp))
144 		return PTR_ERR(filp);
145 
146 	obj->filp = filp;
147 
148 	return 0;
149 }
150 EXPORT_SYMBOL(drm_gem_object_init);
151 
152 /**
153  * drm_gem_private_object_init - initialize an allocated private GEM object
154  * @dev: drm_device the object should be initialized for
155  * @obj: drm_gem_object to initialize
156  * @size: object size
157  *
158  * Initialize an already allocated GEM object of the specified size with
159  * no GEM provided backing store. Instead the caller is responsible for
160  * backing the object and handling it.
161  */
162 void drm_gem_private_object_init(struct drm_device *dev,
163 				 struct drm_gem_object *obj, size_t size)
164 {
165 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
166 
167 	obj->dev = dev;
168 	obj->filp = NULL;
169 
170 	kref_init(&obj->refcount);
171 	obj->handle_count = 0;
172 	obj->size = size;
173 	drm_vma_node_reset(&obj->vma_node);
174 }
175 EXPORT_SYMBOL(drm_gem_private_object_init);
176 
177 static void
178 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
179 {
180 	/*
181 	 * Note: obj->dma_buf can't disappear as long as we still hold a
182 	 * handle reference in obj->handle_count.
183 	 */
184 	mutex_lock(&filp->prime.lock);
185 	if (obj->dma_buf) {
186 		drm_prime_remove_buf_handle_locked(&filp->prime,
187 						   obj->dma_buf);
188 	}
189 	mutex_unlock(&filp->prime.lock);
190 }
191 
192 /**
193  * drm_gem_object_handle_free - release resources bound to userspace handles
194  * @obj: GEM object to clean up.
195  *
196  * Called after the last handle to the object has been closed
197  *
198  * Removes any name for the object. Note that this must be
199  * called before drm_gem_object_free or we'll be touching
200  * freed memory
201  */
202 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
203 {
204 	struct drm_device *dev = obj->dev;
205 
206 	/* Remove any name for this object */
207 	if (obj->name) {
208 		idr_remove(&dev->object_name_idr, obj->name);
209 		obj->name = 0;
210 	}
211 }
212 
213 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
214 {
215 	/* Unbreak the reference cycle if we have an exported dma_buf. */
216 	if (obj->dma_buf) {
217 		dma_buf_put(obj->dma_buf);
218 		obj->dma_buf = NULL;
219 	}
220 }
221 
222 static void
223 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
224 {
225 	struct drm_device *dev = obj->dev;
226 	bool final = false;
227 
228 	if (WARN_ON(obj->handle_count == 0))
229 		return;
230 
231 	/*
232 	* Must bump handle count first as this may be the last
233 	* ref, in which case the object would disappear before we
234 	* checked for a name
235 	*/
236 
237 	mutex_lock(&dev->object_name_lock);
238 	if (--obj->handle_count == 0) {
239 		drm_gem_object_handle_free(obj);
240 		drm_gem_object_exported_dma_buf_free(obj);
241 		final = true;
242 	}
243 	mutex_unlock(&dev->object_name_lock);
244 
245 	if (final)
246 		drm_gem_object_put_unlocked(obj);
247 }
248 
249 /*
250  * Called at device or object close to release the file's
251  * handle references on objects.
252  */
253 static int
254 drm_gem_object_release_handle(int id, void *ptr, void *data)
255 {
256 	struct drm_file *file_priv = data;
257 	struct drm_gem_object *obj = ptr;
258 	struct drm_device *dev = obj->dev;
259 
260 	if (obj->funcs && obj->funcs->close)
261 		obj->funcs->close(obj, file_priv);
262 	else if (dev->driver->gem_close_object)
263 		dev->driver->gem_close_object(obj, file_priv);
264 
265 	if (drm_core_check_feature(dev, DRIVER_PRIME))
266 		drm_gem_remove_prime_handles(obj, file_priv);
267 	drm_vma_node_revoke(&obj->vma_node, file_priv);
268 
269 	drm_gem_object_handle_put_unlocked(obj);
270 
271 	return 0;
272 }
273 
274 /**
275  * drm_gem_handle_delete - deletes the given file-private handle
276  * @filp: drm file-private structure to use for the handle look up
277  * @handle: userspace handle to delete
278  *
279  * Removes the GEM handle from the @filp lookup table which has been added with
280  * drm_gem_handle_create(). If this is the last handle also cleans up linked
281  * resources like GEM names.
282  */
283 int
284 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
285 {
286 	struct drm_gem_object *obj;
287 
288 	spin_lock(&filp->table_lock);
289 
290 	/* Check if we currently have a reference on the object */
291 	obj = idr_replace(&filp->object_idr, NULL, handle);
292 	spin_unlock(&filp->table_lock);
293 	if (IS_ERR_OR_NULL(obj))
294 		return -EINVAL;
295 
296 	/* Release driver's reference and decrement refcount. */
297 	drm_gem_object_release_handle(handle, obj, filp);
298 
299 	/* And finally make the handle available for future allocations. */
300 	spin_lock(&filp->table_lock);
301 	idr_remove(&filp->object_idr, handle);
302 	spin_unlock(&filp->table_lock);
303 
304 	return 0;
305 }
306 EXPORT_SYMBOL(drm_gem_handle_delete);
307 
308 /**
309  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
310  * @file: drm file-private structure containing the gem object
311  * @dev: corresponding drm_device
312  * @handle: gem object handle
313  * @offset: return location for the fake mmap offset
314  *
315  * This implements the &drm_driver.dumb_map_offset kms driver callback for
316  * drivers which use gem to manage their backing storage.
317  *
318  * Returns:
319  * 0 on success or a negative error code on failure.
320  */
321 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
322 			    u32 handle, u64 *offset)
323 {
324 	struct drm_gem_object *obj;
325 	int ret;
326 
327 	obj = drm_gem_object_lookup(file, handle);
328 	if (!obj)
329 		return -ENOENT;
330 
331 	/* Don't allow imported objects to be mapped */
332 	if (obj->import_attach) {
333 		ret = -EINVAL;
334 		goto out;
335 	}
336 
337 	ret = drm_gem_create_mmap_offset(obj);
338 	if (ret)
339 		goto out;
340 
341 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
342 out:
343 	drm_gem_object_put_unlocked(obj);
344 
345 	return ret;
346 }
347 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
348 
349 /**
350  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
351  * @file: drm file-private structure to remove the dumb handle from
352  * @dev: corresponding drm_device
353  * @handle: the dumb handle to remove
354  *
355  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
356  * which use gem to manage their backing storage.
357  */
358 int drm_gem_dumb_destroy(struct drm_file *file,
359 			 struct drm_device *dev,
360 			 uint32_t handle)
361 {
362 	return drm_gem_handle_delete(file, handle);
363 }
364 EXPORT_SYMBOL(drm_gem_dumb_destroy);
365 
366 /**
367  * drm_gem_handle_create_tail - internal functions to create a handle
368  * @file_priv: drm file-private structure to register the handle for
369  * @obj: object to register
370  * @handlep: pointer to return the created handle to the caller
371  *
372  * This expects the &drm_device.object_name_lock to be held already and will
373  * drop it before returning. Used to avoid races in establishing new handles
374  * when importing an object from either an flink name or a dma-buf.
375  *
376  * Handles must be release again through drm_gem_handle_delete(). This is done
377  * when userspace closes @file_priv for all attached handles, or through the
378  * GEM_CLOSE ioctl for individual handles.
379  */
380 int
381 drm_gem_handle_create_tail(struct drm_file *file_priv,
382 			   struct drm_gem_object *obj,
383 			   u32 *handlep)
384 {
385 	struct drm_device *dev = obj->dev;
386 	u32 handle;
387 	int ret;
388 
389 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
390 	if (obj->handle_count++ == 0)
391 		drm_gem_object_get(obj);
392 
393 	/*
394 	 * Get the user-visible handle using idr.  Preload and perform
395 	 * allocation under our spinlock.
396 	 */
397 	idr_preload(GFP_KERNEL);
398 	spin_lock(&file_priv->table_lock);
399 
400 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
401 
402 	spin_unlock(&file_priv->table_lock);
403 	idr_preload_end();
404 
405 	mutex_unlock(&dev->object_name_lock);
406 	if (ret < 0)
407 		goto err_unref;
408 
409 	handle = ret;
410 
411 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
412 	if (ret)
413 		goto err_remove;
414 
415 	if (obj->funcs && obj->funcs->open) {
416 		ret = obj->funcs->open(obj, file_priv);
417 		if (ret)
418 			goto err_revoke;
419 	} else if (dev->driver->gem_open_object) {
420 		ret = dev->driver->gem_open_object(obj, file_priv);
421 		if (ret)
422 			goto err_revoke;
423 	}
424 
425 	*handlep = handle;
426 	return 0;
427 
428 err_revoke:
429 	drm_vma_node_revoke(&obj->vma_node, file_priv);
430 err_remove:
431 	spin_lock(&file_priv->table_lock);
432 	idr_remove(&file_priv->object_idr, handle);
433 	spin_unlock(&file_priv->table_lock);
434 err_unref:
435 	drm_gem_object_handle_put_unlocked(obj);
436 	return ret;
437 }
438 
439 /**
440  * drm_gem_handle_create - create a gem handle for an object
441  * @file_priv: drm file-private structure to register the handle for
442  * @obj: object to register
443  * @handlep: pionter to return the created handle to the caller
444  *
445  * Create a handle for this object. This adds a handle reference to the object,
446  * which includes a regular reference count. Callers will likely want to
447  * dereference the object afterwards.
448  *
449  * Since this publishes @obj to userspace it must be fully set up by this point,
450  * drivers must call this last in their buffer object creation callbacks.
451  */
452 int drm_gem_handle_create(struct drm_file *file_priv,
453 			  struct drm_gem_object *obj,
454 			  u32 *handlep)
455 {
456 	mutex_lock(&obj->dev->object_name_lock);
457 
458 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
459 }
460 EXPORT_SYMBOL(drm_gem_handle_create);
461 
462 
463 /**
464  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
465  * @obj: obj in question
466  *
467  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
468  *
469  * Note that drm_gem_object_release() already calls this function, so drivers
470  * don't have to take care of releasing the mmap offset themselves when freeing
471  * the GEM object.
472  */
473 void
474 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
475 {
476 	struct drm_device *dev = obj->dev;
477 
478 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
479 }
480 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
481 
482 /**
483  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
484  * @obj: obj in question
485  * @size: the virtual size
486  *
487  * GEM memory mapping works by handing back to userspace a fake mmap offset
488  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
489  * up the object based on the offset and sets up the various memory mapping
490  * structures.
491  *
492  * This routine allocates and attaches a fake offset for @obj, in cases where
493  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
494  * Otherwise just use drm_gem_create_mmap_offset().
495  *
496  * This function is idempotent and handles an already allocated mmap offset
497  * transparently. Drivers do not need to check for this case.
498  */
499 int
500 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
501 {
502 	struct drm_device *dev = obj->dev;
503 
504 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
505 				  size / PAGE_SIZE);
506 }
507 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
508 
509 /**
510  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
511  * @obj: obj in question
512  *
513  * GEM memory mapping works by handing back to userspace a fake mmap offset
514  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
515  * up the object based on the offset and sets up the various memory mapping
516  * structures.
517  *
518  * This routine allocates and attaches a fake offset for @obj.
519  *
520  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
521  * the fake offset again.
522  */
523 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
524 {
525 	return drm_gem_create_mmap_offset_size(obj, obj->size);
526 }
527 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
528 
529 /**
530  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
531  * from shmem
532  * @obj: obj in question
533  *
534  * This reads the page-array of the shmem-backing storage of the given gem
535  * object. An array of pages is returned. If a page is not allocated or
536  * swapped-out, this will allocate/swap-in the required pages. Note that the
537  * whole object is covered by the page-array and pinned in memory.
538  *
539  * Use drm_gem_put_pages() to release the array and unpin all pages.
540  *
541  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
542  * If you require other GFP-masks, you have to do those allocations yourself.
543  *
544  * Note that you are not allowed to change gfp-zones during runtime. That is,
545  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
546  * set during initialization. If you have special zone constraints, set them
547  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
548  * to keep pages in the required zone during swap-in.
549  */
550 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
551 {
552 	struct address_space *mapping;
553 	struct page *p, **pages;
554 	int i, npages;
555 
556 	/* This is the shared memory object that backs the GEM resource */
557 	mapping = obj->filp->f_mapping;
558 
559 	/* We already BUG_ON() for non-page-aligned sizes in
560 	 * drm_gem_object_init(), so we should never hit this unless
561 	 * driver author is doing something really wrong:
562 	 */
563 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
564 
565 	npages = obj->size >> PAGE_SHIFT;
566 
567 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
568 	if (pages == NULL)
569 		return ERR_PTR(-ENOMEM);
570 
571 	for (i = 0; i < npages; i++) {
572 		p = shmem_read_mapping_page(mapping, i);
573 		if (IS_ERR(p))
574 			goto fail;
575 		pages[i] = p;
576 
577 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
578 		 * correct region during swapin. Note that this requires
579 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
580 		 * so shmem can relocate pages during swapin if required.
581 		 */
582 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
583 				(page_to_pfn(p) >= 0x00100000UL));
584 	}
585 
586 	return pages;
587 
588 fail:
589 	while (i--)
590 		put_page(pages[i]);
591 
592 	kvfree(pages);
593 	return ERR_CAST(p);
594 }
595 EXPORT_SYMBOL(drm_gem_get_pages);
596 
597 /**
598  * drm_gem_put_pages - helper to free backing pages for a GEM object
599  * @obj: obj in question
600  * @pages: pages to free
601  * @dirty: if true, pages will be marked as dirty
602  * @accessed: if true, the pages will be marked as accessed
603  */
604 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
605 		bool dirty, bool accessed)
606 {
607 	int i, npages;
608 
609 	/* We already BUG_ON() for non-page-aligned sizes in
610 	 * drm_gem_object_init(), so we should never hit this unless
611 	 * driver author is doing something really wrong:
612 	 */
613 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
614 
615 	npages = obj->size >> PAGE_SHIFT;
616 
617 	for (i = 0; i < npages; i++) {
618 		if (dirty)
619 			set_page_dirty(pages[i]);
620 
621 		if (accessed)
622 			mark_page_accessed(pages[i]);
623 
624 		/* Undo the reference we took when populating the table */
625 		put_page(pages[i]);
626 	}
627 
628 	kvfree(pages);
629 }
630 EXPORT_SYMBOL(drm_gem_put_pages);
631 
632 /**
633  * drm_gem_object_lookup - look up a GEM object from it's handle
634  * @filp: DRM file private date
635  * @handle: userspace handle
636  *
637  * Returns:
638  *
639  * A reference to the object named by the handle if such exists on @filp, NULL
640  * otherwise.
641  */
642 struct drm_gem_object *
643 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
644 {
645 	struct drm_gem_object *obj;
646 
647 	spin_lock(&filp->table_lock);
648 
649 	/* Check if we currently have a reference on the object */
650 	obj = idr_find(&filp->object_idr, handle);
651 	if (obj)
652 		drm_gem_object_get(obj);
653 
654 	spin_unlock(&filp->table_lock);
655 
656 	return obj;
657 }
658 EXPORT_SYMBOL(drm_gem_object_lookup);
659 
660 /**
661  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
662  * @dev: drm_device
663  * @data: ioctl data
664  * @file_priv: drm file-private structure
665  *
666  * Releases the handle to an mm object.
667  */
668 int
669 drm_gem_close_ioctl(struct drm_device *dev, void *data,
670 		    struct drm_file *file_priv)
671 {
672 	struct drm_gem_close *args = data;
673 	int ret;
674 
675 	if (!drm_core_check_feature(dev, DRIVER_GEM))
676 		return -EOPNOTSUPP;
677 
678 	ret = drm_gem_handle_delete(file_priv, args->handle);
679 
680 	return ret;
681 }
682 
683 /**
684  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
685  * @dev: drm_device
686  * @data: ioctl data
687  * @file_priv: drm file-private structure
688  *
689  * Create a global name for an object, returning the name.
690  *
691  * Note that the name does not hold a reference; when the object
692  * is freed, the name goes away.
693  */
694 int
695 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
696 		    struct drm_file *file_priv)
697 {
698 	struct drm_gem_flink *args = data;
699 	struct drm_gem_object *obj;
700 	int ret;
701 
702 	if (!drm_core_check_feature(dev, DRIVER_GEM))
703 		return -EOPNOTSUPP;
704 
705 	obj = drm_gem_object_lookup(file_priv, args->handle);
706 	if (obj == NULL)
707 		return -ENOENT;
708 
709 	mutex_lock(&dev->object_name_lock);
710 	/* prevent races with concurrent gem_close. */
711 	if (obj->handle_count == 0) {
712 		ret = -ENOENT;
713 		goto err;
714 	}
715 
716 	if (!obj->name) {
717 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
718 		if (ret < 0)
719 			goto err;
720 
721 		obj->name = ret;
722 	}
723 
724 	args->name = (uint64_t) obj->name;
725 	ret = 0;
726 
727 err:
728 	mutex_unlock(&dev->object_name_lock);
729 	drm_gem_object_put_unlocked(obj);
730 	return ret;
731 }
732 
733 /**
734  * drm_gem_open - implementation of the GEM_OPEN ioctl
735  * @dev: drm_device
736  * @data: ioctl data
737  * @file_priv: drm file-private structure
738  *
739  * Open an object using the global name, returning a handle and the size.
740  *
741  * This handle (of course) holds a reference to the object, so the object
742  * will not go away until the handle is deleted.
743  */
744 int
745 drm_gem_open_ioctl(struct drm_device *dev, void *data,
746 		   struct drm_file *file_priv)
747 {
748 	struct drm_gem_open *args = data;
749 	struct drm_gem_object *obj;
750 	int ret;
751 	u32 handle;
752 
753 	if (!drm_core_check_feature(dev, DRIVER_GEM))
754 		return -EOPNOTSUPP;
755 
756 	mutex_lock(&dev->object_name_lock);
757 	obj = idr_find(&dev->object_name_idr, (int) args->name);
758 	if (obj) {
759 		drm_gem_object_get(obj);
760 	} else {
761 		mutex_unlock(&dev->object_name_lock);
762 		return -ENOENT;
763 	}
764 
765 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
766 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
767 	drm_gem_object_put_unlocked(obj);
768 	if (ret)
769 		return ret;
770 
771 	args->handle = handle;
772 	args->size = obj->size;
773 
774 	return 0;
775 }
776 
777 /**
778  * gem_gem_open - initalizes GEM file-private structures at devnode open time
779  * @dev: drm_device which is being opened by userspace
780  * @file_private: drm file-private structure to set up
781  *
782  * Called at device open time, sets up the structure for handling refcounting
783  * of mm objects.
784  */
785 void
786 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
787 {
788 	idr_init_base(&file_private->object_idr, 1);
789 	spin_lock_init(&file_private->table_lock);
790 }
791 
792 /**
793  * drm_gem_release - release file-private GEM resources
794  * @dev: drm_device which is being closed by userspace
795  * @file_private: drm file-private structure to clean up
796  *
797  * Called at close time when the filp is going away.
798  *
799  * Releases any remaining references on objects by this filp.
800  */
801 void
802 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
803 {
804 	idr_for_each(&file_private->object_idr,
805 		     &drm_gem_object_release_handle, file_private);
806 	idr_destroy(&file_private->object_idr);
807 }
808 
809 /**
810  * drm_gem_object_release - release GEM buffer object resources
811  * @obj: GEM buffer object
812  *
813  * This releases any structures and resources used by @obj and is the invers of
814  * drm_gem_object_init().
815  */
816 void
817 drm_gem_object_release(struct drm_gem_object *obj)
818 {
819 	WARN_ON(obj->dma_buf);
820 
821 	if (obj->filp)
822 		fput(obj->filp);
823 
824 	drm_gem_free_mmap_offset(obj);
825 }
826 EXPORT_SYMBOL(drm_gem_object_release);
827 
828 /**
829  * drm_gem_object_free - free a GEM object
830  * @kref: kref of the object to free
831  *
832  * Called after the last reference to the object has been lost.
833  * Must be called holding &drm_device.struct_mutex.
834  *
835  * Frees the object
836  */
837 void
838 drm_gem_object_free(struct kref *kref)
839 {
840 	struct drm_gem_object *obj =
841 		container_of(kref, struct drm_gem_object, refcount);
842 	struct drm_device *dev = obj->dev;
843 
844 	if (obj->funcs) {
845 		obj->funcs->free(obj);
846 	} else if (dev->driver->gem_free_object_unlocked) {
847 		dev->driver->gem_free_object_unlocked(obj);
848 	} else if (dev->driver->gem_free_object) {
849 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
850 
851 		dev->driver->gem_free_object(obj);
852 	}
853 }
854 EXPORT_SYMBOL(drm_gem_object_free);
855 
856 /**
857  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
858  * @obj: GEM buffer object
859  *
860  * This releases a reference to @obj. Callers must not hold the
861  * &drm_device.struct_mutex lock when calling this function.
862  *
863  * See also __drm_gem_object_put().
864  */
865 void
866 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
867 {
868 	struct drm_device *dev;
869 
870 	if (!obj)
871 		return;
872 
873 	dev = obj->dev;
874 
875 	if (dev->driver->gem_free_object) {
876 		might_lock(&dev->struct_mutex);
877 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
878 				&dev->struct_mutex))
879 			mutex_unlock(&dev->struct_mutex);
880 	} else {
881 		kref_put(&obj->refcount, drm_gem_object_free);
882 	}
883 }
884 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
885 
886 /**
887  * drm_gem_object_put - release a GEM buffer object reference
888  * @obj: GEM buffer object
889  *
890  * This releases a reference to @obj. Callers must hold the
891  * &drm_device.struct_mutex lock when calling this function, even when the
892  * driver doesn't use &drm_device.struct_mutex for anything.
893  *
894  * For drivers not encumbered with legacy locking use
895  * drm_gem_object_put_unlocked() instead.
896  */
897 void
898 drm_gem_object_put(struct drm_gem_object *obj)
899 {
900 	if (obj) {
901 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
902 
903 		kref_put(&obj->refcount, drm_gem_object_free);
904 	}
905 }
906 EXPORT_SYMBOL(drm_gem_object_put);
907 
908 /**
909  * drm_gem_vm_open - vma->ops->open implementation for GEM
910  * @vma: VM area structure
911  *
912  * This function implements the #vm_operations_struct open() callback for GEM
913  * drivers. This must be used together with drm_gem_vm_close().
914  */
915 void drm_gem_vm_open(struct vm_area_struct *vma)
916 {
917 	struct drm_gem_object *obj = vma->vm_private_data;
918 
919 	drm_gem_object_get(obj);
920 }
921 EXPORT_SYMBOL(drm_gem_vm_open);
922 
923 /**
924  * drm_gem_vm_close - vma->ops->close implementation for GEM
925  * @vma: VM area structure
926  *
927  * This function implements the #vm_operations_struct close() callback for GEM
928  * drivers. This must be used together with drm_gem_vm_open().
929  */
930 void drm_gem_vm_close(struct vm_area_struct *vma)
931 {
932 	struct drm_gem_object *obj = vma->vm_private_data;
933 
934 	drm_gem_object_put_unlocked(obj);
935 }
936 EXPORT_SYMBOL(drm_gem_vm_close);
937 
938 /**
939  * drm_gem_mmap_obj - memory map a GEM object
940  * @obj: the GEM object to map
941  * @obj_size: the object size to be mapped, in bytes
942  * @vma: VMA for the area to be mapped
943  *
944  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
945  * provided by the driver. Depending on their requirements, drivers can either
946  * provide a fault handler in their gem_vm_ops (in which case any accesses to
947  * the object will be trapped, to perform migration, GTT binding, surface
948  * register allocation, or performance monitoring), or mmap the buffer memory
949  * synchronously after calling drm_gem_mmap_obj.
950  *
951  * This function is mainly intended to implement the DMABUF mmap operation, when
952  * the GEM object is not looked up based on its fake offset. To implement the
953  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
954  *
955  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
956  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
957  * callers must verify access restrictions before calling this helper.
958  *
959  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
960  * size, or if no gem_vm_ops are provided.
961  */
962 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
963 		     struct vm_area_struct *vma)
964 {
965 	struct drm_device *dev = obj->dev;
966 
967 	/* Check for valid size. */
968 	if (obj_size < vma->vm_end - vma->vm_start)
969 		return -EINVAL;
970 
971 	if (obj->funcs && obj->funcs->vm_ops)
972 		vma->vm_ops = obj->funcs->vm_ops;
973 	else if (dev->driver->gem_vm_ops)
974 		vma->vm_ops = dev->driver->gem_vm_ops;
975 	else
976 		return -EINVAL;
977 
978 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
979 	vma->vm_private_data = obj;
980 	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
981 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
982 
983 	/* Take a ref for this mapping of the object, so that the fault
984 	 * handler can dereference the mmap offset's pointer to the object.
985 	 * This reference is cleaned up by the corresponding vm_close
986 	 * (which should happen whether the vma was created by this call, or
987 	 * by a vm_open due to mremap or partial unmap or whatever).
988 	 */
989 	drm_gem_object_get(obj);
990 
991 	return 0;
992 }
993 EXPORT_SYMBOL(drm_gem_mmap_obj);
994 
995 /**
996  * drm_gem_mmap - memory map routine for GEM objects
997  * @filp: DRM file pointer
998  * @vma: VMA for the area to be mapped
999  *
1000  * If a driver supports GEM object mapping, mmap calls on the DRM file
1001  * descriptor will end up here.
1002  *
1003  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1004  * contain the fake offset we created when the GTT map ioctl was called on
1005  * the object) and map it with a call to drm_gem_mmap_obj().
1006  *
1007  * If the caller is not granted access to the buffer object, the mmap will fail
1008  * with EACCES. Please see the vma manager for more information.
1009  */
1010 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1011 {
1012 	struct drm_file *priv = filp->private_data;
1013 	struct drm_device *dev = priv->minor->dev;
1014 	struct drm_gem_object *obj = NULL;
1015 	struct drm_vma_offset_node *node;
1016 	int ret;
1017 
1018 	if (drm_dev_is_unplugged(dev))
1019 		return -ENODEV;
1020 
1021 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1022 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1023 						  vma->vm_pgoff,
1024 						  vma_pages(vma));
1025 	if (likely(node)) {
1026 		obj = container_of(node, struct drm_gem_object, vma_node);
1027 		/*
1028 		 * When the object is being freed, after it hits 0-refcnt it
1029 		 * proceeds to tear down the object. In the process it will
1030 		 * attempt to remove the VMA offset and so acquire this
1031 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1032 		 * that matches our range, we know it is in the process of being
1033 		 * destroyed and will be freed as soon as we release the lock -
1034 		 * so we have to check for the 0-refcnted object and treat it as
1035 		 * invalid.
1036 		 */
1037 		if (!kref_get_unless_zero(&obj->refcount))
1038 			obj = NULL;
1039 	}
1040 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1041 
1042 	if (!obj)
1043 		return -EINVAL;
1044 
1045 	if (!drm_vma_node_is_allowed(node, priv)) {
1046 		drm_gem_object_put_unlocked(obj);
1047 		return -EACCES;
1048 	}
1049 
1050 	if (node->readonly) {
1051 		if (vma->vm_flags & VM_WRITE) {
1052 			drm_gem_object_put_unlocked(obj);
1053 			return -EINVAL;
1054 		}
1055 
1056 		vma->vm_flags &= ~VM_MAYWRITE;
1057 	}
1058 
1059 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1060 			       vma);
1061 
1062 	drm_gem_object_put_unlocked(obj);
1063 
1064 	return ret;
1065 }
1066 EXPORT_SYMBOL(drm_gem_mmap);
1067 
1068 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1069 			const struct drm_gem_object *obj)
1070 {
1071 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1072 	drm_printf_indent(p, indent, "refcount=%u\n",
1073 			  kref_read(&obj->refcount));
1074 	drm_printf_indent(p, indent, "start=%08lx\n",
1075 			  drm_vma_node_start(&obj->vma_node));
1076 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1077 	drm_printf_indent(p, indent, "imported=%s\n",
1078 			  obj->import_attach ? "yes" : "no");
1079 
1080 	if (obj->funcs && obj->funcs->print_info)
1081 		obj->funcs->print_info(p, indent, obj);
1082 	else if (obj->dev->driver->gem_print_info)
1083 		obj->dev->driver->gem_print_info(p, indent, obj);
1084 }
1085 
1086 /**
1087  * drm_gem_pin - Pin backing buffer in memory
1088  * @obj: GEM object
1089  *
1090  * Make sure the backing buffer is pinned in memory.
1091  *
1092  * Returns:
1093  * 0 on success or a negative error code on failure.
1094  */
1095 int drm_gem_pin(struct drm_gem_object *obj)
1096 {
1097 	if (obj->funcs && obj->funcs->pin)
1098 		return obj->funcs->pin(obj);
1099 	else if (obj->dev->driver->gem_prime_pin)
1100 		return obj->dev->driver->gem_prime_pin(obj);
1101 	else
1102 		return 0;
1103 }
1104 EXPORT_SYMBOL(drm_gem_pin);
1105 
1106 /**
1107  * drm_gem_unpin - Unpin backing buffer from memory
1108  * @obj: GEM object
1109  *
1110  * Relax the requirement that the backing buffer is pinned in memory.
1111  */
1112 void drm_gem_unpin(struct drm_gem_object *obj)
1113 {
1114 	if (obj->funcs && obj->funcs->unpin)
1115 		obj->funcs->unpin(obj);
1116 	else if (obj->dev->driver->gem_prime_unpin)
1117 		obj->dev->driver->gem_prime_unpin(obj);
1118 }
1119 EXPORT_SYMBOL(drm_gem_unpin);
1120 
1121 /**
1122  * drm_gem_vmap - Map buffer into kernel virtual address space
1123  * @obj: GEM object
1124  *
1125  * Returns:
1126  * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
1127  * error code on failure.
1128  */
1129 void *drm_gem_vmap(struct drm_gem_object *obj)
1130 {
1131 	void *vaddr;
1132 
1133 	if (obj->funcs && obj->funcs->vmap)
1134 		vaddr = obj->funcs->vmap(obj);
1135 	else if (obj->dev->driver->gem_prime_vmap)
1136 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1137 	else
1138 		vaddr = ERR_PTR(-EOPNOTSUPP);
1139 
1140 	if (!vaddr)
1141 		vaddr = ERR_PTR(-ENOMEM);
1142 
1143 	return vaddr;
1144 }
1145 EXPORT_SYMBOL(drm_gem_vmap);
1146 
1147 /**
1148  * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
1149  * @obj: GEM object
1150  * @vaddr: Virtual address (can be NULL)
1151  */
1152 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1153 {
1154 	if (!vaddr)
1155 		return;
1156 
1157 	if (obj->funcs && obj->funcs->vunmap)
1158 		obj->funcs->vunmap(obj, vaddr);
1159 	else if (obj->dev->driver->gem_prime_vunmap)
1160 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1161 }
1162 EXPORT_SYMBOL(drm_gem_vunmap);
1163