xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision 002dff36)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mem_encrypt.h>
40 #include <linux/pagevec.h>
41 
42 #include <drm/drm.h>
43 #include <drm/drm_device.h>
44 #include <drm/drm_drv.h>
45 #include <drm/drm_file.h>
46 #include <drm/drm_gem.h>
47 #include <drm/drm_managed.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_vma_manager.h>
50 
51 #include "drm_internal.h"
52 
53 /** @file drm_gem.c
54  *
55  * This file provides some of the base ioctls and library routines for
56  * the graphics memory manager implemented by each device driver.
57  *
58  * Because various devices have different requirements in terms of
59  * synchronization and migration strategies, implementing that is left up to
60  * the driver, and all that the general API provides should be generic --
61  * allocating objects, reading/writing data with the cpu, freeing objects.
62  * Even there, platform-dependent optimizations for reading/writing data with
63  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
64  * the DRI2 implementation wants to have at least allocate/mmap be generic.
65  *
66  * The goal was to have swap-backed object allocation managed through
67  * struct file.  However, file descriptors as handles to a struct file have
68  * two major failings:
69  * - Process limits prevent more than 1024 or so being used at a time by
70  *   default.
71  * - Inability to allocate high fds will aggravate the X Server's select()
72  *   handling, and likely that of many GL client applications as well.
73  *
74  * This led to a plan of using our own integer IDs (called handles, following
75  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
76  * ioctls.  The objects themselves will still include the struct file so
77  * that we can transition to fds if the required kernel infrastructure shows
78  * up at a later date, and as our interface with shmfs for memory allocation.
79  */
80 
81 static void
82 drm_gem_init_release(struct drm_device *dev, void *ptr)
83 {
84 	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
85 }
86 
87 /**
88  * drm_gem_init - Initialize the GEM device fields
89  * @dev: drm_devic structure to initialize
90  */
91 int
92 drm_gem_init(struct drm_device *dev)
93 {
94 	struct drm_vma_offset_manager *vma_offset_manager;
95 
96 	mutex_init(&dev->object_name_lock);
97 	idr_init_base(&dev->object_name_idr, 1);
98 
99 	vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
100 					  GFP_KERNEL);
101 	if (!vma_offset_manager) {
102 		DRM_ERROR("out of memory\n");
103 		return -ENOMEM;
104 	}
105 
106 	dev->vma_offset_manager = vma_offset_manager;
107 	drm_vma_offset_manager_init(vma_offset_manager,
108 				    DRM_FILE_PAGE_OFFSET_START,
109 				    DRM_FILE_PAGE_OFFSET_SIZE);
110 
111 	return drmm_add_action(dev, drm_gem_init_release, NULL);
112 }
113 
114 /**
115  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
116  * @dev: drm_device the object should be initialized for
117  * @obj: drm_gem_object to initialize
118  * @size: object size
119  *
120  * Initialize an already allocated GEM object of the specified size with
121  * shmfs backing store.
122  */
123 int drm_gem_object_init(struct drm_device *dev,
124 			struct drm_gem_object *obj, size_t size)
125 {
126 	struct file *filp;
127 
128 	drm_gem_private_object_init(dev, obj, size);
129 
130 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
131 	if (IS_ERR(filp))
132 		return PTR_ERR(filp);
133 
134 	obj->filp = filp;
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL(drm_gem_object_init);
139 
140 /**
141  * drm_gem_private_object_init - initialize an allocated private GEM object
142  * @dev: drm_device the object should be initialized for
143  * @obj: drm_gem_object to initialize
144  * @size: object size
145  *
146  * Initialize an already allocated GEM object of the specified size with
147  * no GEM provided backing store. Instead the caller is responsible for
148  * backing the object and handling it.
149  */
150 void drm_gem_private_object_init(struct drm_device *dev,
151 				 struct drm_gem_object *obj, size_t size)
152 {
153 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154 
155 	obj->dev = dev;
156 	obj->filp = NULL;
157 
158 	kref_init(&obj->refcount);
159 	obj->handle_count = 0;
160 	obj->size = size;
161 	dma_resv_init(&obj->_resv);
162 	if (!obj->resv)
163 		obj->resv = &obj->_resv;
164 
165 	drm_vma_node_reset(&obj->vma_node);
166 }
167 EXPORT_SYMBOL(drm_gem_private_object_init);
168 
169 static void
170 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
171 {
172 	/*
173 	 * Note: obj->dma_buf can't disappear as long as we still hold a
174 	 * handle reference in obj->handle_count.
175 	 */
176 	mutex_lock(&filp->prime.lock);
177 	if (obj->dma_buf) {
178 		drm_prime_remove_buf_handle_locked(&filp->prime,
179 						   obj->dma_buf);
180 	}
181 	mutex_unlock(&filp->prime.lock);
182 }
183 
184 /**
185  * drm_gem_object_handle_free - release resources bound to userspace handles
186  * @obj: GEM object to clean up.
187  *
188  * Called after the last handle to the object has been closed
189  *
190  * Removes any name for the object. Note that this must be
191  * called before drm_gem_object_free or we'll be touching
192  * freed memory
193  */
194 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
195 {
196 	struct drm_device *dev = obj->dev;
197 
198 	/* Remove any name for this object */
199 	if (obj->name) {
200 		idr_remove(&dev->object_name_idr, obj->name);
201 		obj->name = 0;
202 	}
203 }
204 
205 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
206 {
207 	/* Unbreak the reference cycle if we have an exported dma_buf. */
208 	if (obj->dma_buf) {
209 		dma_buf_put(obj->dma_buf);
210 		obj->dma_buf = NULL;
211 	}
212 }
213 
214 static void
215 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
216 {
217 	struct drm_device *dev = obj->dev;
218 	bool final = false;
219 
220 	if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
221 		return;
222 
223 	/*
224 	* Must bump handle count first as this may be the last
225 	* ref, in which case the object would disappear before we
226 	* checked for a name
227 	*/
228 
229 	mutex_lock(&dev->object_name_lock);
230 	if (--obj->handle_count == 0) {
231 		drm_gem_object_handle_free(obj);
232 		drm_gem_object_exported_dma_buf_free(obj);
233 		final = true;
234 	}
235 	mutex_unlock(&dev->object_name_lock);
236 
237 	if (final)
238 		drm_gem_object_put_unlocked(obj);
239 }
240 
241 /*
242  * Called at device or object close to release the file's
243  * handle references on objects.
244  */
245 static int
246 drm_gem_object_release_handle(int id, void *ptr, void *data)
247 {
248 	struct drm_file *file_priv = data;
249 	struct drm_gem_object *obj = ptr;
250 	struct drm_device *dev = obj->dev;
251 
252 	if (obj->funcs && obj->funcs->close)
253 		obj->funcs->close(obj, file_priv);
254 	else if (dev->driver->gem_close_object)
255 		dev->driver->gem_close_object(obj, file_priv);
256 
257 	drm_gem_remove_prime_handles(obj, file_priv);
258 	drm_vma_node_revoke(&obj->vma_node, file_priv);
259 
260 	drm_gem_object_handle_put_unlocked(obj);
261 
262 	return 0;
263 }
264 
265 /**
266  * drm_gem_handle_delete - deletes the given file-private handle
267  * @filp: drm file-private structure to use for the handle look up
268  * @handle: userspace handle to delete
269  *
270  * Removes the GEM handle from the @filp lookup table which has been added with
271  * drm_gem_handle_create(). If this is the last handle also cleans up linked
272  * resources like GEM names.
273  */
274 int
275 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
276 {
277 	struct drm_gem_object *obj;
278 
279 	spin_lock(&filp->table_lock);
280 
281 	/* Check if we currently have a reference on the object */
282 	obj = idr_replace(&filp->object_idr, NULL, handle);
283 	spin_unlock(&filp->table_lock);
284 	if (IS_ERR_OR_NULL(obj))
285 		return -EINVAL;
286 
287 	/* Release driver's reference and decrement refcount. */
288 	drm_gem_object_release_handle(handle, obj, filp);
289 
290 	/* And finally make the handle available for future allocations. */
291 	spin_lock(&filp->table_lock);
292 	idr_remove(&filp->object_idr, handle);
293 	spin_unlock(&filp->table_lock);
294 
295 	return 0;
296 }
297 EXPORT_SYMBOL(drm_gem_handle_delete);
298 
299 /**
300  * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
301  * @file: drm file-private structure containing the gem object
302  * @dev: corresponding drm_device
303  * @handle: gem object handle
304  * @offset: return location for the fake mmap offset
305  *
306  * This implements the &drm_driver.dumb_map_offset kms driver callback for
307  * drivers which use gem to manage their backing storage.
308  *
309  * Returns:
310  * 0 on success or a negative error code on failure.
311  */
312 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
313 			    u32 handle, u64 *offset)
314 {
315 	struct drm_gem_object *obj;
316 	int ret;
317 
318 	obj = drm_gem_object_lookup(file, handle);
319 	if (!obj)
320 		return -ENOENT;
321 
322 	/* Don't allow imported objects to be mapped */
323 	if (obj->import_attach) {
324 		ret = -EINVAL;
325 		goto out;
326 	}
327 
328 	ret = drm_gem_create_mmap_offset(obj);
329 	if (ret)
330 		goto out;
331 
332 	*offset = drm_vma_node_offset_addr(&obj->vma_node);
333 out:
334 	drm_gem_object_put_unlocked(obj);
335 
336 	return ret;
337 }
338 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
339 
340 /**
341  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
342  * @file: drm file-private structure to remove the dumb handle from
343  * @dev: corresponding drm_device
344  * @handle: the dumb handle to remove
345  *
346  * This implements the &drm_driver.dumb_destroy kms driver callback for drivers
347  * which use gem to manage their backing storage.
348  */
349 int drm_gem_dumb_destroy(struct drm_file *file,
350 			 struct drm_device *dev,
351 			 uint32_t handle)
352 {
353 	return drm_gem_handle_delete(file, handle);
354 }
355 EXPORT_SYMBOL(drm_gem_dumb_destroy);
356 
357 /**
358  * drm_gem_handle_create_tail - internal functions to create a handle
359  * @file_priv: drm file-private structure to register the handle for
360  * @obj: object to register
361  * @handlep: pointer to return the created handle to the caller
362  *
363  * This expects the &drm_device.object_name_lock to be held already and will
364  * drop it before returning. Used to avoid races in establishing new handles
365  * when importing an object from either an flink name or a dma-buf.
366  *
367  * Handles must be release again through drm_gem_handle_delete(). This is done
368  * when userspace closes @file_priv for all attached handles, or through the
369  * GEM_CLOSE ioctl for individual handles.
370  */
371 int
372 drm_gem_handle_create_tail(struct drm_file *file_priv,
373 			   struct drm_gem_object *obj,
374 			   u32 *handlep)
375 {
376 	struct drm_device *dev = obj->dev;
377 	u32 handle;
378 	int ret;
379 
380 	WARN_ON(!mutex_is_locked(&dev->object_name_lock));
381 	if (obj->handle_count++ == 0)
382 		drm_gem_object_get(obj);
383 
384 	/*
385 	 * Get the user-visible handle using idr.  Preload and perform
386 	 * allocation under our spinlock.
387 	 */
388 	idr_preload(GFP_KERNEL);
389 	spin_lock(&file_priv->table_lock);
390 
391 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
392 
393 	spin_unlock(&file_priv->table_lock);
394 	idr_preload_end();
395 
396 	mutex_unlock(&dev->object_name_lock);
397 	if (ret < 0)
398 		goto err_unref;
399 
400 	handle = ret;
401 
402 	ret = drm_vma_node_allow(&obj->vma_node, file_priv);
403 	if (ret)
404 		goto err_remove;
405 
406 	if (obj->funcs && obj->funcs->open) {
407 		ret = obj->funcs->open(obj, file_priv);
408 		if (ret)
409 			goto err_revoke;
410 	} else if (dev->driver->gem_open_object) {
411 		ret = dev->driver->gem_open_object(obj, file_priv);
412 		if (ret)
413 			goto err_revoke;
414 	}
415 
416 	*handlep = handle;
417 	return 0;
418 
419 err_revoke:
420 	drm_vma_node_revoke(&obj->vma_node, file_priv);
421 err_remove:
422 	spin_lock(&file_priv->table_lock);
423 	idr_remove(&file_priv->object_idr, handle);
424 	spin_unlock(&file_priv->table_lock);
425 err_unref:
426 	drm_gem_object_handle_put_unlocked(obj);
427 	return ret;
428 }
429 
430 /**
431  * drm_gem_handle_create - create a gem handle for an object
432  * @file_priv: drm file-private structure to register the handle for
433  * @obj: object to register
434  * @handlep: pointer to return the created handle to the caller
435  *
436  * Create a handle for this object. This adds a handle reference to the object,
437  * which includes a regular reference count. Callers will likely want to
438  * dereference the object afterwards.
439  *
440  * Since this publishes @obj to userspace it must be fully set up by this point,
441  * drivers must call this last in their buffer object creation callbacks.
442  */
443 int drm_gem_handle_create(struct drm_file *file_priv,
444 			  struct drm_gem_object *obj,
445 			  u32 *handlep)
446 {
447 	mutex_lock(&obj->dev->object_name_lock);
448 
449 	return drm_gem_handle_create_tail(file_priv, obj, handlep);
450 }
451 EXPORT_SYMBOL(drm_gem_handle_create);
452 
453 
454 /**
455  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
456  * @obj: obj in question
457  *
458  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
459  *
460  * Note that drm_gem_object_release() already calls this function, so drivers
461  * don't have to take care of releasing the mmap offset themselves when freeing
462  * the GEM object.
463  */
464 void
465 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
466 {
467 	struct drm_device *dev = obj->dev;
468 
469 	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
470 }
471 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
472 
473 /**
474  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
475  * @obj: obj in question
476  * @size: the virtual size
477  *
478  * GEM memory mapping works by handing back to userspace a fake mmap offset
479  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
480  * up the object based on the offset and sets up the various memory mapping
481  * structures.
482  *
483  * This routine allocates and attaches a fake offset for @obj, in cases where
484  * the virtual size differs from the physical size (ie. &drm_gem_object.size).
485  * Otherwise just use drm_gem_create_mmap_offset().
486  *
487  * This function is idempotent and handles an already allocated mmap offset
488  * transparently. Drivers do not need to check for this case.
489  */
490 int
491 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
492 {
493 	struct drm_device *dev = obj->dev;
494 
495 	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
496 				  size / PAGE_SIZE);
497 }
498 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
499 
500 /**
501  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
502  * @obj: obj in question
503  *
504  * GEM memory mapping works by handing back to userspace a fake mmap offset
505  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
506  * up the object based on the offset and sets up the various memory mapping
507  * structures.
508  *
509  * This routine allocates and attaches a fake offset for @obj.
510  *
511  * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
512  * the fake offset again.
513  */
514 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
515 {
516 	return drm_gem_create_mmap_offset_size(obj, obj->size);
517 }
518 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
519 
520 /*
521  * Move pages to appropriate lru and release the pagevec, decrementing the
522  * ref count of those pages.
523  */
524 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
525 {
526 	check_move_unevictable_pages(pvec);
527 	__pagevec_release(pvec);
528 	cond_resched();
529 }
530 
531 /**
532  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
533  * from shmem
534  * @obj: obj in question
535  *
536  * This reads the page-array of the shmem-backing storage of the given gem
537  * object. An array of pages is returned. If a page is not allocated or
538  * swapped-out, this will allocate/swap-in the required pages. Note that the
539  * whole object is covered by the page-array and pinned in memory.
540  *
541  * Use drm_gem_put_pages() to release the array and unpin all pages.
542  *
543  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
544  * If you require other GFP-masks, you have to do those allocations yourself.
545  *
546  * Note that you are not allowed to change gfp-zones during runtime. That is,
547  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
548  * set during initialization. If you have special zone constraints, set them
549  * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
550  * to keep pages in the required zone during swap-in.
551  */
552 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
553 {
554 	struct address_space *mapping;
555 	struct page *p, **pages;
556 	struct pagevec pvec;
557 	int i, npages;
558 
559 	/* This is the shared memory object that backs the GEM resource */
560 	mapping = obj->filp->f_mapping;
561 
562 	/* We already BUG_ON() for non-page-aligned sizes in
563 	 * drm_gem_object_init(), so we should never hit this unless
564 	 * driver author is doing something really wrong:
565 	 */
566 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
567 
568 	npages = obj->size >> PAGE_SHIFT;
569 
570 	pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
571 	if (pages == NULL)
572 		return ERR_PTR(-ENOMEM);
573 
574 	mapping_set_unevictable(mapping);
575 
576 	for (i = 0; i < npages; i++) {
577 		p = shmem_read_mapping_page(mapping, i);
578 		if (IS_ERR(p))
579 			goto fail;
580 		pages[i] = p;
581 
582 		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
583 		 * correct region during swapin. Note that this requires
584 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
585 		 * so shmem can relocate pages during swapin if required.
586 		 */
587 		BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
588 				(page_to_pfn(p) >= 0x00100000UL));
589 	}
590 
591 	return pages;
592 
593 fail:
594 	mapping_clear_unevictable(mapping);
595 	pagevec_init(&pvec);
596 	while (i--) {
597 		if (!pagevec_add(&pvec, pages[i]))
598 			drm_gem_check_release_pagevec(&pvec);
599 	}
600 	if (pagevec_count(&pvec))
601 		drm_gem_check_release_pagevec(&pvec);
602 
603 	kvfree(pages);
604 	return ERR_CAST(p);
605 }
606 EXPORT_SYMBOL(drm_gem_get_pages);
607 
608 /**
609  * drm_gem_put_pages - helper to free backing pages for a GEM object
610  * @obj: obj in question
611  * @pages: pages to free
612  * @dirty: if true, pages will be marked as dirty
613  * @accessed: if true, the pages will be marked as accessed
614  */
615 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
616 		bool dirty, bool accessed)
617 {
618 	int i, npages;
619 	struct address_space *mapping;
620 	struct pagevec pvec;
621 
622 	mapping = file_inode(obj->filp)->i_mapping;
623 	mapping_clear_unevictable(mapping);
624 
625 	/* We already BUG_ON() for non-page-aligned sizes in
626 	 * drm_gem_object_init(), so we should never hit this unless
627 	 * driver author is doing something really wrong:
628 	 */
629 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
630 
631 	npages = obj->size >> PAGE_SHIFT;
632 
633 	pagevec_init(&pvec);
634 	for (i = 0; i < npages; i++) {
635 		if (!pages[i])
636 			continue;
637 
638 		if (dirty)
639 			set_page_dirty(pages[i]);
640 
641 		if (accessed)
642 			mark_page_accessed(pages[i]);
643 
644 		/* Undo the reference we took when populating the table */
645 		if (!pagevec_add(&pvec, pages[i]))
646 			drm_gem_check_release_pagevec(&pvec);
647 	}
648 	if (pagevec_count(&pvec))
649 		drm_gem_check_release_pagevec(&pvec);
650 
651 	kvfree(pages);
652 }
653 EXPORT_SYMBOL(drm_gem_put_pages);
654 
655 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
656 			  struct drm_gem_object **objs)
657 {
658 	int i, ret = 0;
659 	struct drm_gem_object *obj;
660 
661 	spin_lock(&filp->table_lock);
662 
663 	for (i = 0; i < count; i++) {
664 		/* Check if we currently have a reference on the object */
665 		obj = idr_find(&filp->object_idr, handle[i]);
666 		if (!obj) {
667 			ret = -ENOENT;
668 			break;
669 		}
670 		drm_gem_object_get(obj);
671 		objs[i] = obj;
672 	}
673 	spin_unlock(&filp->table_lock);
674 
675 	return ret;
676 }
677 
678 /**
679  * drm_gem_objects_lookup - look up GEM objects from an array of handles
680  * @filp: DRM file private date
681  * @bo_handles: user pointer to array of userspace handle
682  * @count: size of handle array
683  * @objs_out: returned pointer to array of drm_gem_object pointers
684  *
685  * Takes an array of userspace handles and returns a newly allocated array of
686  * GEM objects.
687  *
688  * For a single handle lookup, use drm_gem_object_lookup().
689  *
690  * Returns:
691  *
692  * @objs filled in with GEM object pointers. Returned GEM objects need to be
693  * released with drm_gem_object_put(). -ENOENT is returned on a lookup
694  * failure. 0 is returned on success.
695  *
696  */
697 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
698 			   int count, struct drm_gem_object ***objs_out)
699 {
700 	int ret;
701 	u32 *handles;
702 	struct drm_gem_object **objs;
703 
704 	if (!count)
705 		return 0;
706 
707 	objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
708 			     GFP_KERNEL | __GFP_ZERO);
709 	if (!objs)
710 		return -ENOMEM;
711 
712 	handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
713 	if (!handles) {
714 		ret = -ENOMEM;
715 		goto out;
716 	}
717 
718 	if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
719 		ret = -EFAULT;
720 		DRM_DEBUG("Failed to copy in GEM handles\n");
721 		goto out;
722 	}
723 
724 	ret = objects_lookup(filp, handles, count, objs);
725 	*objs_out = objs;
726 
727 out:
728 	kvfree(handles);
729 	return ret;
730 
731 }
732 EXPORT_SYMBOL(drm_gem_objects_lookup);
733 
734 /**
735  * drm_gem_object_lookup - look up a GEM object from its handle
736  * @filp: DRM file private date
737  * @handle: userspace handle
738  *
739  * Returns:
740  *
741  * A reference to the object named by the handle if such exists on @filp, NULL
742  * otherwise.
743  *
744  * If looking up an array of handles, use drm_gem_objects_lookup().
745  */
746 struct drm_gem_object *
747 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
748 {
749 	struct drm_gem_object *obj = NULL;
750 
751 	objects_lookup(filp, &handle, 1, &obj);
752 	return obj;
753 }
754 EXPORT_SYMBOL(drm_gem_object_lookup);
755 
756 /**
757  * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
758  * shared and/or exclusive fences.
759  * @filep: DRM file private date
760  * @handle: userspace handle
761  * @wait_all: if true, wait on all fences, else wait on just exclusive fence
762  * @timeout: timeout value in jiffies or zero to return immediately
763  *
764  * Returns:
765  *
766  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
767  * greater than 0 on success.
768  */
769 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
770 				    bool wait_all, unsigned long timeout)
771 {
772 	long ret;
773 	struct drm_gem_object *obj;
774 
775 	obj = drm_gem_object_lookup(filep, handle);
776 	if (!obj) {
777 		DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
778 		return -EINVAL;
779 	}
780 
781 	ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
782 						  true, timeout);
783 	if (ret == 0)
784 		ret = -ETIME;
785 	else if (ret > 0)
786 		ret = 0;
787 
788 	drm_gem_object_put_unlocked(obj);
789 
790 	return ret;
791 }
792 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
793 
794 /**
795  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
796  * @dev: drm_device
797  * @data: ioctl data
798  * @file_priv: drm file-private structure
799  *
800  * Releases the handle to an mm object.
801  */
802 int
803 drm_gem_close_ioctl(struct drm_device *dev, void *data,
804 		    struct drm_file *file_priv)
805 {
806 	struct drm_gem_close *args = data;
807 	int ret;
808 
809 	if (!drm_core_check_feature(dev, DRIVER_GEM))
810 		return -EOPNOTSUPP;
811 
812 	ret = drm_gem_handle_delete(file_priv, args->handle);
813 
814 	return ret;
815 }
816 
817 /**
818  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
819  * @dev: drm_device
820  * @data: ioctl data
821  * @file_priv: drm file-private structure
822  *
823  * Create a global name for an object, returning the name.
824  *
825  * Note that the name does not hold a reference; when the object
826  * is freed, the name goes away.
827  */
828 int
829 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
830 		    struct drm_file *file_priv)
831 {
832 	struct drm_gem_flink *args = data;
833 	struct drm_gem_object *obj;
834 	int ret;
835 
836 	if (!drm_core_check_feature(dev, DRIVER_GEM))
837 		return -EOPNOTSUPP;
838 
839 	obj = drm_gem_object_lookup(file_priv, args->handle);
840 	if (obj == NULL)
841 		return -ENOENT;
842 
843 	mutex_lock(&dev->object_name_lock);
844 	/* prevent races with concurrent gem_close. */
845 	if (obj->handle_count == 0) {
846 		ret = -ENOENT;
847 		goto err;
848 	}
849 
850 	if (!obj->name) {
851 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
852 		if (ret < 0)
853 			goto err;
854 
855 		obj->name = ret;
856 	}
857 
858 	args->name = (uint64_t) obj->name;
859 	ret = 0;
860 
861 err:
862 	mutex_unlock(&dev->object_name_lock);
863 	drm_gem_object_put_unlocked(obj);
864 	return ret;
865 }
866 
867 /**
868  * drm_gem_open - implementation of the GEM_OPEN ioctl
869  * @dev: drm_device
870  * @data: ioctl data
871  * @file_priv: drm file-private structure
872  *
873  * Open an object using the global name, returning a handle and the size.
874  *
875  * This handle (of course) holds a reference to the object, so the object
876  * will not go away until the handle is deleted.
877  */
878 int
879 drm_gem_open_ioctl(struct drm_device *dev, void *data,
880 		   struct drm_file *file_priv)
881 {
882 	struct drm_gem_open *args = data;
883 	struct drm_gem_object *obj;
884 	int ret;
885 	u32 handle;
886 
887 	if (!drm_core_check_feature(dev, DRIVER_GEM))
888 		return -EOPNOTSUPP;
889 
890 	mutex_lock(&dev->object_name_lock);
891 	obj = idr_find(&dev->object_name_idr, (int) args->name);
892 	if (obj) {
893 		drm_gem_object_get(obj);
894 	} else {
895 		mutex_unlock(&dev->object_name_lock);
896 		return -ENOENT;
897 	}
898 
899 	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
900 	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
901 	drm_gem_object_put_unlocked(obj);
902 	if (ret)
903 		return ret;
904 
905 	args->handle = handle;
906 	args->size = obj->size;
907 
908 	return 0;
909 }
910 
911 /**
912  * gem_gem_open - initalizes GEM file-private structures at devnode open time
913  * @dev: drm_device which is being opened by userspace
914  * @file_private: drm file-private structure to set up
915  *
916  * Called at device open time, sets up the structure for handling refcounting
917  * of mm objects.
918  */
919 void
920 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
921 {
922 	idr_init_base(&file_private->object_idr, 1);
923 	spin_lock_init(&file_private->table_lock);
924 }
925 
926 /**
927  * drm_gem_release - release file-private GEM resources
928  * @dev: drm_device which is being closed by userspace
929  * @file_private: drm file-private structure to clean up
930  *
931  * Called at close time when the filp is going away.
932  *
933  * Releases any remaining references on objects by this filp.
934  */
935 void
936 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
937 {
938 	idr_for_each(&file_private->object_idr,
939 		     &drm_gem_object_release_handle, file_private);
940 	idr_destroy(&file_private->object_idr);
941 }
942 
943 /**
944  * drm_gem_object_release - release GEM buffer object resources
945  * @obj: GEM buffer object
946  *
947  * This releases any structures and resources used by @obj and is the invers of
948  * drm_gem_object_init().
949  */
950 void
951 drm_gem_object_release(struct drm_gem_object *obj)
952 {
953 	WARN_ON(obj->dma_buf);
954 
955 	if (obj->filp)
956 		fput(obj->filp);
957 
958 	dma_resv_fini(&obj->_resv);
959 	drm_gem_free_mmap_offset(obj);
960 }
961 EXPORT_SYMBOL(drm_gem_object_release);
962 
963 /**
964  * drm_gem_object_free - free a GEM object
965  * @kref: kref of the object to free
966  *
967  * Called after the last reference to the object has been lost.
968  * Must be called holding &drm_device.struct_mutex.
969  *
970  * Frees the object
971  */
972 void
973 drm_gem_object_free(struct kref *kref)
974 {
975 	struct drm_gem_object *obj =
976 		container_of(kref, struct drm_gem_object, refcount);
977 	struct drm_device *dev = obj->dev;
978 
979 	if (obj->funcs) {
980 		obj->funcs->free(obj);
981 	} else if (dev->driver->gem_free_object_unlocked) {
982 		dev->driver->gem_free_object_unlocked(obj);
983 	} else if (dev->driver->gem_free_object) {
984 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
985 
986 		dev->driver->gem_free_object(obj);
987 	}
988 }
989 EXPORT_SYMBOL(drm_gem_object_free);
990 
991 /**
992  * drm_gem_object_put_unlocked - drop a GEM buffer object reference
993  * @obj: GEM buffer object
994  *
995  * This releases a reference to @obj. Callers must not hold the
996  * &drm_device.struct_mutex lock when calling this function.
997  *
998  * See also __drm_gem_object_put().
999  */
1000 void
1001 drm_gem_object_put_unlocked(struct drm_gem_object *obj)
1002 {
1003 	struct drm_device *dev;
1004 
1005 	if (!obj)
1006 		return;
1007 
1008 	dev = obj->dev;
1009 
1010 	if (dev->driver->gem_free_object) {
1011 		might_lock(&dev->struct_mutex);
1012 		if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
1013 				&dev->struct_mutex))
1014 			mutex_unlock(&dev->struct_mutex);
1015 	} else {
1016 		kref_put(&obj->refcount, drm_gem_object_free);
1017 	}
1018 }
1019 EXPORT_SYMBOL(drm_gem_object_put_unlocked);
1020 
1021 /**
1022  * drm_gem_object_put - release a GEM buffer object reference
1023  * @obj: GEM buffer object
1024  *
1025  * This releases a reference to @obj. Callers must hold the
1026  * &drm_device.struct_mutex lock when calling this function, even when the
1027  * driver doesn't use &drm_device.struct_mutex for anything.
1028  *
1029  * For drivers not encumbered with legacy locking use
1030  * drm_gem_object_put_unlocked() instead.
1031  */
1032 void
1033 drm_gem_object_put(struct drm_gem_object *obj)
1034 {
1035 	if (obj) {
1036 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
1037 
1038 		kref_put(&obj->refcount, drm_gem_object_free);
1039 	}
1040 }
1041 EXPORT_SYMBOL(drm_gem_object_put);
1042 
1043 /**
1044  * drm_gem_vm_open - vma->ops->open implementation for GEM
1045  * @vma: VM area structure
1046  *
1047  * This function implements the #vm_operations_struct open() callback for GEM
1048  * drivers. This must be used together with drm_gem_vm_close().
1049  */
1050 void drm_gem_vm_open(struct vm_area_struct *vma)
1051 {
1052 	struct drm_gem_object *obj = vma->vm_private_data;
1053 
1054 	drm_gem_object_get(obj);
1055 }
1056 EXPORT_SYMBOL(drm_gem_vm_open);
1057 
1058 /**
1059  * drm_gem_vm_close - vma->ops->close implementation for GEM
1060  * @vma: VM area structure
1061  *
1062  * This function implements the #vm_operations_struct close() callback for GEM
1063  * drivers. This must be used together with drm_gem_vm_open().
1064  */
1065 void drm_gem_vm_close(struct vm_area_struct *vma)
1066 {
1067 	struct drm_gem_object *obj = vma->vm_private_data;
1068 
1069 	drm_gem_object_put_unlocked(obj);
1070 }
1071 EXPORT_SYMBOL(drm_gem_vm_close);
1072 
1073 /**
1074  * drm_gem_mmap_obj - memory map a GEM object
1075  * @obj: the GEM object to map
1076  * @obj_size: the object size to be mapped, in bytes
1077  * @vma: VMA for the area to be mapped
1078  *
1079  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
1080  * provided by the driver. Depending on their requirements, drivers can either
1081  * provide a fault handler in their gem_vm_ops (in which case any accesses to
1082  * the object will be trapped, to perform migration, GTT binding, surface
1083  * register allocation, or performance monitoring), or mmap the buffer memory
1084  * synchronously after calling drm_gem_mmap_obj.
1085  *
1086  * This function is mainly intended to implement the DMABUF mmap operation, when
1087  * the GEM object is not looked up based on its fake offset. To implement the
1088  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1089  *
1090  * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1091  * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1092  * callers must verify access restrictions before calling this helper.
1093  *
1094  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1095  * size, or if no gem_vm_ops are provided.
1096  */
1097 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1098 		     struct vm_area_struct *vma)
1099 {
1100 	struct drm_device *dev = obj->dev;
1101 	int ret;
1102 
1103 	/* Check for valid size. */
1104 	if (obj_size < vma->vm_end - vma->vm_start)
1105 		return -EINVAL;
1106 
1107 	/* Take a ref for this mapping of the object, so that the fault
1108 	 * handler can dereference the mmap offset's pointer to the object.
1109 	 * This reference is cleaned up by the corresponding vm_close
1110 	 * (which should happen whether the vma was created by this call, or
1111 	 * by a vm_open due to mremap or partial unmap or whatever).
1112 	 */
1113 	drm_gem_object_get(obj);
1114 
1115 	if (obj->funcs && obj->funcs->mmap) {
1116 		ret = obj->funcs->mmap(obj, vma);
1117 		if (ret) {
1118 			drm_gem_object_put_unlocked(obj);
1119 			return ret;
1120 		}
1121 		WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1122 	} else {
1123 		if (obj->funcs && obj->funcs->vm_ops)
1124 			vma->vm_ops = obj->funcs->vm_ops;
1125 		else if (dev->driver->gem_vm_ops)
1126 			vma->vm_ops = dev->driver->gem_vm_ops;
1127 		else {
1128 			drm_gem_object_put_unlocked(obj);
1129 			return -EINVAL;
1130 		}
1131 
1132 		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1133 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1134 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1135 	}
1136 
1137 	vma->vm_private_data = obj;
1138 
1139 	return 0;
1140 }
1141 EXPORT_SYMBOL(drm_gem_mmap_obj);
1142 
1143 /**
1144  * drm_gem_mmap - memory map routine for GEM objects
1145  * @filp: DRM file pointer
1146  * @vma: VMA for the area to be mapped
1147  *
1148  * If a driver supports GEM object mapping, mmap calls on the DRM file
1149  * descriptor will end up here.
1150  *
1151  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1152  * contain the fake offset we created when the GTT map ioctl was called on
1153  * the object) and map it with a call to drm_gem_mmap_obj().
1154  *
1155  * If the caller is not granted access to the buffer object, the mmap will fail
1156  * with EACCES. Please see the vma manager for more information.
1157  */
1158 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1159 {
1160 	struct drm_file *priv = filp->private_data;
1161 	struct drm_device *dev = priv->minor->dev;
1162 	struct drm_gem_object *obj = NULL;
1163 	struct drm_vma_offset_node *node;
1164 	int ret;
1165 
1166 	if (drm_dev_is_unplugged(dev))
1167 		return -ENODEV;
1168 
1169 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1170 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1171 						  vma->vm_pgoff,
1172 						  vma_pages(vma));
1173 	if (likely(node)) {
1174 		obj = container_of(node, struct drm_gem_object, vma_node);
1175 		/*
1176 		 * When the object is being freed, after it hits 0-refcnt it
1177 		 * proceeds to tear down the object. In the process it will
1178 		 * attempt to remove the VMA offset and so acquire this
1179 		 * mgr->vm_lock.  Therefore if we find an object with a 0-refcnt
1180 		 * that matches our range, we know it is in the process of being
1181 		 * destroyed and will be freed as soon as we release the lock -
1182 		 * so we have to check for the 0-refcnted object and treat it as
1183 		 * invalid.
1184 		 */
1185 		if (!kref_get_unless_zero(&obj->refcount))
1186 			obj = NULL;
1187 	}
1188 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1189 
1190 	if (!obj)
1191 		return -EINVAL;
1192 
1193 	if (!drm_vma_node_is_allowed(node, priv)) {
1194 		drm_gem_object_put_unlocked(obj);
1195 		return -EACCES;
1196 	}
1197 
1198 	if (node->readonly) {
1199 		if (vma->vm_flags & VM_WRITE) {
1200 			drm_gem_object_put_unlocked(obj);
1201 			return -EINVAL;
1202 		}
1203 
1204 		vma->vm_flags &= ~VM_MAYWRITE;
1205 	}
1206 
1207 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1208 			       vma);
1209 
1210 	drm_gem_object_put_unlocked(obj);
1211 
1212 	return ret;
1213 }
1214 EXPORT_SYMBOL(drm_gem_mmap);
1215 
1216 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1217 			const struct drm_gem_object *obj)
1218 {
1219 	drm_printf_indent(p, indent, "name=%d\n", obj->name);
1220 	drm_printf_indent(p, indent, "refcount=%u\n",
1221 			  kref_read(&obj->refcount));
1222 	drm_printf_indent(p, indent, "start=%08lx\n",
1223 			  drm_vma_node_start(&obj->vma_node));
1224 	drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1225 	drm_printf_indent(p, indent, "imported=%s\n",
1226 			  obj->import_attach ? "yes" : "no");
1227 
1228 	if (obj->funcs && obj->funcs->print_info)
1229 		obj->funcs->print_info(p, indent, obj);
1230 	else if (obj->dev->driver->gem_print_info)
1231 		obj->dev->driver->gem_print_info(p, indent, obj);
1232 }
1233 
1234 int drm_gem_pin(struct drm_gem_object *obj)
1235 {
1236 	if (obj->funcs && obj->funcs->pin)
1237 		return obj->funcs->pin(obj);
1238 	else if (obj->dev->driver->gem_prime_pin)
1239 		return obj->dev->driver->gem_prime_pin(obj);
1240 	else
1241 		return 0;
1242 }
1243 
1244 void drm_gem_unpin(struct drm_gem_object *obj)
1245 {
1246 	if (obj->funcs && obj->funcs->unpin)
1247 		obj->funcs->unpin(obj);
1248 	else if (obj->dev->driver->gem_prime_unpin)
1249 		obj->dev->driver->gem_prime_unpin(obj);
1250 }
1251 
1252 void *drm_gem_vmap(struct drm_gem_object *obj)
1253 {
1254 	void *vaddr;
1255 
1256 	if (obj->funcs && obj->funcs->vmap)
1257 		vaddr = obj->funcs->vmap(obj);
1258 	else if (obj->dev->driver->gem_prime_vmap)
1259 		vaddr = obj->dev->driver->gem_prime_vmap(obj);
1260 	else
1261 		vaddr = ERR_PTR(-EOPNOTSUPP);
1262 
1263 	if (!vaddr)
1264 		vaddr = ERR_PTR(-ENOMEM);
1265 
1266 	return vaddr;
1267 }
1268 
1269 void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
1270 {
1271 	if (!vaddr)
1272 		return;
1273 
1274 	if (obj->funcs && obj->funcs->vunmap)
1275 		obj->funcs->vunmap(obj, vaddr);
1276 	else if (obj->dev->driver->gem_prime_vunmap)
1277 		obj->dev->driver->gem_prime_vunmap(obj, vaddr);
1278 }
1279 
1280 /**
1281  * drm_gem_lock_reservations - Sets up the ww context and acquires
1282  * the lock on an array of GEM objects.
1283  *
1284  * Once you've locked your reservations, you'll want to set up space
1285  * for your shared fences (if applicable), submit your job, then
1286  * drm_gem_unlock_reservations().
1287  *
1288  * @objs: drm_gem_objects to lock
1289  * @count: Number of objects in @objs
1290  * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1291  * part of tracking this set of locked reservations.
1292  */
1293 int
1294 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1295 			  struct ww_acquire_ctx *acquire_ctx)
1296 {
1297 	int contended = -1;
1298 	int i, ret;
1299 
1300 	ww_acquire_init(acquire_ctx, &reservation_ww_class);
1301 
1302 retry:
1303 	if (contended != -1) {
1304 		struct drm_gem_object *obj = objs[contended];
1305 
1306 		ret = dma_resv_lock_slow_interruptible(obj->resv,
1307 								 acquire_ctx);
1308 		if (ret) {
1309 			ww_acquire_done(acquire_ctx);
1310 			return ret;
1311 		}
1312 	}
1313 
1314 	for (i = 0; i < count; i++) {
1315 		if (i == contended)
1316 			continue;
1317 
1318 		ret = dma_resv_lock_interruptible(objs[i]->resv,
1319 							    acquire_ctx);
1320 		if (ret) {
1321 			int j;
1322 
1323 			for (j = 0; j < i; j++)
1324 				dma_resv_unlock(objs[j]->resv);
1325 
1326 			if (contended != -1 && contended >= i)
1327 				dma_resv_unlock(objs[contended]->resv);
1328 
1329 			if (ret == -EDEADLK) {
1330 				contended = i;
1331 				goto retry;
1332 			}
1333 
1334 			ww_acquire_done(acquire_ctx);
1335 			return ret;
1336 		}
1337 	}
1338 
1339 	ww_acquire_done(acquire_ctx);
1340 
1341 	return 0;
1342 }
1343 EXPORT_SYMBOL(drm_gem_lock_reservations);
1344 
1345 void
1346 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1347 			    struct ww_acquire_ctx *acquire_ctx)
1348 {
1349 	int i;
1350 
1351 	for (i = 0; i < count; i++)
1352 		dma_resv_unlock(objs[i]->resv);
1353 
1354 	ww_acquire_fini(acquire_ctx);
1355 }
1356 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1357 
1358 /**
1359  * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1360  * waited on, deduplicating fences from the same context.
1361  *
1362  * @fence_array: array of dma_fence * for the job to block on.
1363  * @fence: the dma_fence to add to the list of dependencies.
1364  *
1365  * Returns:
1366  * 0 on success, or an error on failing to expand the array.
1367  */
1368 int drm_gem_fence_array_add(struct xarray *fence_array,
1369 			    struct dma_fence *fence)
1370 {
1371 	struct dma_fence *entry;
1372 	unsigned long index;
1373 	u32 id = 0;
1374 	int ret;
1375 
1376 	if (!fence)
1377 		return 0;
1378 
1379 	/* Deduplicate if we already depend on a fence from the same context.
1380 	 * This lets the size of the array of deps scale with the number of
1381 	 * engines involved, rather than the number of BOs.
1382 	 */
1383 	xa_for_each(fence_array, index, entry) {
1384 		if (entry->context != fence->context)
1385 			continue;
1386 
1387 		if (dma_fence_is_later(fence, entry)) {
1388 			dma_fence_put(entry);
1389 			xa_store(fence_array, index, fence, GFP_KERNEL);
1390 		} else {
1391 			dma_fence_put(fence);
1392 		}
1393 		return 0;
1394 	}
1395 
1396 	ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1397 	if (ret != 0)
1398 		dma_fence_put(fence);
1399 
1400 	return ret;
1401 }
1402 EXPORT_SYMBOL(drm_gem_fence_array_add);
1403 
1404 /**
1405  * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1406  * in the GEM object's reservation object to an array of dma_fences for use in
1407  * scheduling a rendering job.
1408  *
1409  * This should be called after drm_gem_lock_reservations() on your array of
1410  * GEM objects used in the job but before updating the reservations with your
1411  * own fences.
1412  *
1413  * @fence_array: array of dma_fence * for the job to block on.
1414  * @obj: the gem object to add new dependencies from.
1415  * @write: whether the job might write the object (so we need to depend on
1416  * shared fences in the reservation object).
1417  */
1418 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1419 				     struct drm_gem_object *obj,
1420 				     bool write)
1421 {
1422 	int ret;
1423 	struct dma_fence **fences;
1424 	unsigned int i, fence_count;
1425 
1426 	if (!write) {
1427 		struct dma_fence *fence =
1428 			dma_resv_get_excl_rcu(obj->resv);
1429 
1430 		return drm_gem_fence_array_add(fence_array, fence);
1431 	}
1432 
1433 	ret = dma_resv_get_fences_rcu(obj->resv, NULL,
1434 						&fence_count, &fences);
1435 	if (ret || !fence_count)
1436 		return ret;
1437 
1438 	for (i = 0; i < fence_count; i++) {
1439 		ret = drm_gem_fence_array_add(fence_array, fences[i]);
1440 		if (ret)
1441 			break;
1442 	}
1443 
1444 	for (; i < fence_count; i++)
1445 		dma_fence_put(fences[i]);
1446 	kfree(fences);
1447 	return ret;
1448 }
1449 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1450