xref: /openbmc/linux/drivers/gpu/drm/drm_gem_shmem_helper.c (revision 6c71a0574249f5e5a45fe055ab5f837023d5eeca)
12194a63aSNoralf Trønnes // SPDX-License-Identifier: GPL-2.0
22194a63aSNoralf Trønnes /*
32194a63aSNoralf Trønnes  * Copyright 2018 Noralf Trønnes
42194a63aSNoralf Trønnes  */
52194a63aSNoralf Trønnes 
62194a63aSNoralf Trønnes #include <linux/dma-buf.h>
72194a63aSNoralf Trønnes #include <linux/export.h>
84b2b5e14SThomas Zimmermann #include <linux/module.h>
92194a63aSNoralf Trønnes #include <linux/mutex.h>
102194a63aSNoralf Trønnes #include <linux/shmem_fs.h>
112194a63aSNoralf Trønnes #include <linux/slab.h>
122194a63aSNoralf Trønnes #include <linux/vmalloc.h>
138581fd40SJakub Kicinski #include <linux/module.h>
142194a63aSNoralf Trønnes 
15804b6e5eSDaniel Vetter #ifdef CONFIG_X86
16804b6e5eSDaniel Vetter #include <asm/set_memory.h>
17804b6e5eSDaniel Vetter #endif
18804b6e5eSDaniel Vetter 
19d3ea256aSSam Ravnborg #include <drm/drm.h>
202194a63aSNoralf Trønnes #include <drm/drm_device.h>
212194a63aSNoralf Trønnes #include <drm/drm_drv.h>
222194a63aSNoralf Trønnes #include <drm/drm_gem_shmem_helper.h>
232194a63aSNoralf Trønnes #include <drm/drm_prime.h>
242194a63aSNoralf Trønnes #include <drm/drm_print.h>
252194a63aSNoralf Trønnes 
2608e438e6SStephen Rothwell MODULE_IMPORT_NS(DMA_BUF);
2708e438e6SStephen Rothwell 
282194a63aSNoralf Trønnes /**
292194a63aSNoralf Trønnes  * DOC: overview
302194a63aSNoralf Trønnes  *
312194a63aSNoralf Trønnes  * This library provides helpers for GEM objects backed by shmem buffers
322194a63aSNoralf Trønnes  * allocated using anonymous pageable memory.
33a193f3b4SThomas Zimmermann  *
34a193f3b4SThomas Zimmermann  * Functions that operate on the GEM object receive struct &drm_gem_shmem_object.
35a193f3b4SThomas Zimmermann  * For GEM callback helpers in struct &drm_gem_object functions, see likewise
36a193f3b4SThomas Zimmermann  * named functions with an _object_ infix (e.g., drm_gem_shmem_object_vmap() wraps
37a193f3b4SThomas Zimmermann  * drm_gem_shmem_vmap()). These helpers perform the necessary type conversion.
382194a63aSNoralf Trønnes  */
392194a63aSNoralf Trønnes 
402194a63aSNoralf Trønnes static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
41c7fbcb71SThomas Zimmermann 	.free = drm_gem_shmem_object_free,
42c7fbcb71SThomas Zimmermann 	.print_info = drm_gem_shmem_object_print_info,
43c7fbcb71SThomas Zimmermann 	.pin = drm_gem_shmem_object_pin,
44c7fbcb71SThomas Zimmermann 	.unpin = drm_gem_shmem_object_unpin,
45c7fbcb71SThomas Zimmermann 	.get_sg_table = drm_gem_shmem_object_get_sg_table,
46c7fbcb71SThomas Zimmermann 	.vmap = drm_gem_shmem_object_vmap,
47c7fbcb71SThomas Zimmermann 	.vunmap = drm_gem_shmem_object_vunmap,
48c7fbcb71SThomas Zimmermann 	.mmap = drm_gem_shmem_object_mmap,
49d315bdbfSThomas Zimmermann 	.vm_ops = &drm_gem_shmem_vm_ops,
502194a63aSNoralf Trønnes };
512194a63aSNoralf Trønnes 
527d2cd72aSDaniel Vetter static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device * dev,size_t size,bool private)537d2cd72aSDaniel Vetter __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
542194a63aSNoralf Trønnes {
552194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem;
562194a63aSNoralf Trønnes 	struct drm_gem_object *obj;
577d2cd72aSDaniel Vetter 	int ret = 0;
582194a63aSNoralf Trønnes 
592194a63aSNoralf Trønnes 	size = PAGE_ALIGN(size);
602194a63aSNoralf Trønnes 
614ff22f48SThomas Zimmermann 	if (dev->driver->gem_create_object) {
622194a63aSNoralf Trønnes 		obj = dev->driver->gem_create_object(dev, size);
634ff22f48SThomas Zimmermann 		if (IS_ERR(obj))
644ff22f48SThomas Zimmermann 			return ERR_CAST(obj);
650cf2ef46SThomas Zimmermann 		shmem = to_drm_gem_shmem_obj(obj);
664ff22f48SThomas Zimmermann 	} else {
674ff22f48SThomas Zimmermann 		shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
684ff22f48SThomas Zimmermann 		if (!shmem)
694ff22f48SThomas Zimmermann 			return ERR_PTR(-ENOMEM);
704ff22f48SThomas Zimmermann 		obj = &shmem->base;
714ff22f48SThomas Zimmermann 	}
720cf2ef46SThomas Zimmermann 
732194a63aSNoralf Trønnes 	if (!obj->funcs)
742194a63aSNoralf Trønnes 		obj->funcs = &drm_gem_shmem_funcs;
752194a63aSNoralf Trønnes 
760cf2ef46SThomas Zimmermann 	if (private) {
777d2cd72aSDaniel Vetter 		drm_gem_private_object_init(dev, obj, size);
780cf2ef46SThomas Zimmermann 		shmem->map_wc = false; /* dma-buf mappings use always writecombine */
790cf2ef46SThomas Zimmermann 	} else {
802194a63aSNoralf Trønnes 		ret = drm_gem_object_init(dev, obj, size);
810cf2ef46SThomas Zimmermann 	}
827df34a61SChunyouTang 	if (ret) {
837df34a61SChunyouTang 		drm_gem_private_object_fini(obj);
842194a63aSNoralf Trønnes 		goto err_free;
857df34a61SChunyouTang 	}
862194a63aSNoralf Trønnes 
872194a63aSNoralf Trønnes 	ret = drm_gem_create_mmap_offset(obj);
882194a63aSNoralf Trønnes 	if (ret)
892194a63aSNoralf Trønnes 		goto err_release;
902194a63aSNoralf Trønnes 
9117acb9f3SRob Herring 	INIT_LIST_HEAD(&shmem->madv_list);
922194a63aSNoralf Trønnes 
935b9f5f11SDaniel Vetter 	if (!private) {
942194a63aSNoralf Trønnes 		/*
952194a63aSNoralf Trønnes 		 * Our buffers are kept pinned, so allocating them
962194a63aSNoralf Trønnes 		 * from the MOVABLE zone is a really bad idea, and
972194a63aSNoralf Trønnes 		 * conflicts with CMA. See comments above new_inode()
982194a63aSNoralf Trønnes 		 * why this is required _and_ expected if you're
992194a63aSNoralf Trønnes 		 * going to pin these pages.
1002194a63aSNoralf Trønnes 		 */
1012194a63aSNoralf Trønnes 		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
1022194a63aSNoralf Trønnes 				     __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
1035b9f5f11SDaniel Vetter 	}
1042194a63aSNoralf Trønnes 
1052194a63aSNoralf Trønnes 	return shmem;
1062194a63aSNoralf Trønnes 
1072194a63aSNoralf Trønnes err_release:
1082194a63aSNoralf Trønnes 	drm_gem_object_release(obj);
1092194a63aSNoralf Trønnes err_free:
1102194a63aSNoralf Trønnes 	kfree(obj);
1112194a63aSNoralf Trønnes 
1122194a63aSNoralf Trønnes 	return ERR_PTR(ret);
1132194a63aSNoralf Trønnes }
1147d2cd72aSDaniel Vetter /**
1157d2cd72aSDaniel Vetter  * drm_gem_shmem_create - Allocate an object with the given size
1167d2cd72aSDaniel Vetter  * @dev: DRM device
1177d2cd72aSDaniel Vetter  * @size: Size of the object to allocate
1187d2cd72aSDaniel Vetter  *
1197d2cd72aSDaniel Vetter  * This function creates a shmem GEM object.
1207d2cd72aSDaniel Vetter  *
1217d2cd72aSDaniel Vetter  * Returns:
1227d2cd72aSDaniel Vetter  * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
1237d2cd72aSDaniel Vetter  * error code on failure.
1247d2cd72aSDaniel Vetter  */
drm_gem_shmem_create(struct drm_device * dev,size_t size)1257d2cd72aSDaniel Vetter struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
1267d2cd72aSDaniel Vetter {
1277d2cd72aSDaniel Vetter 	return __drm_gem_shmem_create(dev, size, false);
1287d2cd72aSDaniel Vetter }
1292194a63aSNoralf Trønnes EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
1302194a63aSNoralf Trønnes 
1312194a63aSNoralf Trønnes /**
132a193f3b4SThomas Zimmermann  * drm_gem_shmem_free - Free resources associated with a shmem GEM object
133a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object to free
1342194a63aSNoralf Trønnes  *
1352194a63aSNoralf Trønnes  * This function cleans up the GEM object state and frees the memory used to
136c7fbcb71SThomas Zimmermann  * store the object itself.
1372194a63aSNoralf Trønnes  */
drm_gem_shmem_free(struct drm_gem_shmem_object * shmem)138a193f3b4SThomas Zimmermann void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
1392194a63aSNoralf Trønnes {
140a193f3b4SThomas Zimmermann 	struct drm_gem_object *obj = &shmem->base;
1412194a63aSNoralf Trønnes 
1422194a63aSNoralf Trønnes 	if (obj->import_attach) {
1432194a63aSNoralf Trønnes 		drm_prime_gem_destroy(obj, shmem->sgt);
1442194a63aSNoralf Trønnes 	} else {
14521aa27ddSDmitry Osipenko 		dma_resv_lock(shmem->base.resv, NULL);
14621aa27ddSDmitry Osipenko 
14721aa27ddSDmitry Osipenko 		drm_WARN_ON(obj->dev, shmem->vmap_use_count);
14821aa27ddSDmitry Osipenko 
1492194a63aSNoralf Trønnes 		if (shmem->sgt) {
1506c6fa39cSMarek Szyprowski 			dma_unmap_sgtable(obj->dev->dev, shmem->sgt,
1516c6fa39cSMarek Szyprowski 					  DMA_BIDIRECTIONAL, 0);
1522194a63aSNoralf Trønnes 			sg_free_table(shmem->sgt);
1532194a63aSNoralf Trønnes 			kfree(shmem->sgt);
1542194a63aSNoralf Trønnes 		}
1553bf5189dSRob Herring 		if (shmem->pages)
1563bf5189dSRob Herring 			drm_gem_shmem_put_pages(shmem);
1572194a63aSNoralf Trønnes 
1583f6a1e22SDmitry Osipenko 		drm_WARN_ON(obj->dev, shmem->pages_use_count);
1592194a63aSNoralf Trønnes 
16021aa27ddSDmitry Osipenko 		dma_resv_unlock(shmem->base.resv);
16121aa27ddSDmitry Osipenko 	}
16221aa27ddSDmitry Osipenko 
1632194a63aSNoralf Trønnes 	drm_gem_object_release(obj);
1642194a63aSNoralf Trønnes 	kfree(shmem);
1652194a63aSNoralf Trønnes }
166a193f3b4SThomas Zimmermann EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
1672194a63aSNoralf Trønnes 
drm_gem_shmem_get_pages(struct drm_gem_shmem_object * shmem)16821aa27ddSDmitry Osipenko static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
1692194a63aSNoralf Trønnes {
1702194a63aSNoralf Trønnes 	struct drm_gem_object *obj = &shmem->base;
1712194a63aSNoralf Trønnes 	struct page **pages;
1722194a63aSNoralf Trønnes 
17321aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
17421aa27ddSDmitry Osipenko 
1752194a63aSNoralf Trønnes 	if (shmem->pages_use_count++ > 0)
1762194a63aSNoralf Trønnes 		return 0;
1772194a63aSNoralf Trønnes 
1782194a63aSNoralf Trønnes 	pages = drm_gem_get_pages(obj);
1792194a63aSNoralf Trønnes 	if (IS_ERR(pages)) {
1803f6a1e22SDmitry Osipenko 		drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
1813f6a1e22SDmitry Osipenko 			    PTR_ERR(pages));
1822194a63aSNoralf Trønnes 		shmem->pages_use_count = 0;
1832194a63aSNoralf Trønnes 		return PTR_ERR(pages);
1842194a63aSNoralf Trønnes 	}
1852194a63aSNoralf Trønnes 
186804b6e5eSDaniel Vetter 	/*
187804b6e5eSDaniel Vetter 	 * TODO: Allocating WC pages which are correctly flushed is only
188804b6e5eSDaniel Vetter 	 * supported on x86. Ideal solution would be a GFP_WC flag, which also
189804b6e5eSDaniel Vetter 	 * ttm_pool.c could use.
190804b6e5eSDaniel Vetter 	 */
191804b6e5eSDaniel Vetter #ifdef CONFIG_X86
192804b6e5eSDaniel Vetter 	if (shmem->map_wc)
193804b6e5eSDaniel Vetter 		set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
194804b6e5eSDaniel Vetter #endif
195804b6e5eSDaniel Vetter 
1962194a63aSNoralf Trønnes 	shmem->pages = pages;
1972194a63aSNoralf Trønnes 
1982194a63aSNoralf Trønnes 	return 0;
1992194a63aSNoralf Trønnes }
2002194a63aSNoralf Trønnes 
2012194a63aSNoralf Trønnes /*
20221aa27ddSDmitry Osipenko  * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
2032194a63aSNoralf Trønnes  * @shmem: shmem GEM object
2042194a63aSNoralf Trønnes  *
20521aa27ddSDmitry Osipenko  * This function decreases the use count and puts the backing pages when use drops to zero.
2062194a63aSNoralf Trønnes  */
drm_gem_shmem_put_pages(struct drm_gem_shmem_object * shmem)20721aa27ddSDmitry Osipenko void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
2082194a63aSNoralf Trønnes {
2093f6a1e22SDmitry Osipenko 	struct drm_gem_object *obj = &shmem->base;
2102194a63aSNoralf Trønnes 
21121aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
2122194a63aSNoralf Trønnes 
2133f6a1e22SDmitry Osipenko 	if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
2142194a63aSNoralf Trønnes 		return;
2152194a63aSNoralf Trønnes 
2162194a63aSNoralf Trønnes 	if (--shmem->pages_use_count > 0)
2172194a63aSNoralf Trønnes 		return;
2182194a63aSNoralf Trønnes 
219804b6e5eSDaniel Vetter #ifdef CONFIG_X86
220804b6e5eSDaniel Vetter 	if (shmem->map_wc)
221804b6e5eSDaniel Vetter 		set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
222804b6e5eSDaniel Vetter #endif
223804b6e5eSDaniel Vetter 
2242194a63aSNoralf Trønnes 	drm_gem_put_pages(obj, shmem->pages,
2252194a63aSNoralf Trønnes 			  shmem->pages_mark_dirty_on_put,
2262194a63aSNoralf Trønnes 			  shmem->pages_mark_accessed_on_put);
2272194a63aSNoralf Trønnes 	shmem->pages = NULL;
2282194a63aSNoralf Trønnes }
2292194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_put_pages);
2302194a63aSNoralf Trønnes 
drm_gem_shmem_pin_locked(struct drm_gem_shmem_object * shmem)23121aa27ddSDmitry Osipenko static int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
23221aa27ddSDmitry Osipenko {
23321aa27ddSDmitry Osipenko 	int ret;
23421aa27ddSDmitry Osipenko 
23521aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
23621aa27ddSDmitry Osipenko 
23721aa27ddSDmitry Osipenko 	ret = drm_gem_shmem_get_pages(shmem);
23821aa27ddSDmitry Osipenko 
23921aa27ddSDmitry Osipenko 	return ret;
24021aa27ddSDmitry Osipenko }
24121aa27ddSDmitry Osipenko 
drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object * shmem)24221aa27ddSDmitry Osipenko static void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem)
24321aa27ddSDmitry Osipenko {
24421aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
24521aa27ddSDmitry Osipenko 
24621aa27ddSDmitry Osipenko 	drm_gem_shmem_put_pages(shmem);
24721aa27ddSDmitry Osipenko }
24821aa27ddSDmitry Osipenko 
2492194a63aSNoralf Trønnes /**
2502194a63aSNoralf Trønnes  * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
251a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object
2522194a63aSNoralf Trønnes  *
2532194a63aSNoralf Trønnes  * This function makes sure the backing pages are pinned in memory while the
254c7fbcb71SThomas Zimmermann  * buffer is exported.
2552194a63aSNoralf Trønnes  *
2562194a63aSNoralf Trønnes  * Returns:
2572194a63aSNoralf Trønnes  * 0 on success or a negative error code on failure.
2582194a63aSNoralf Trønnes  */
drm_gem_shmem_pin(struct drm_gem_shmem_object * shmem)259a193f3b4SThomas Zimmermann int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
2602194a63aSNoralf Trønnes {
2613f6a1e22SDmitry Osipenko 	struct drm_gem_object *obj = &shmem->base;
26221aa27ddSDmitry Osipenko 	int ret;
2633f6a1e22SDmitry Osipenko 
2643f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, obj->import_attach);
26552640835SDaniel Vetter 
26621aa27ddSDmitry Osipenko 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
26721aa27ddSDmitry Osipenko 	if (ret)
26821aa27ddSDmitry Osipenko 		return ret;
26921aa27ddSDmitry Osipenko 	ret = drm_gem_shmem_pin_locked(shmem);
27021aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
27121aa27ddSDmitry Osipenko 
27221aa27ddSDmitry Osipenko 	return ret;
2732194a63aSNoralf Trønnes }
2742194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_pin);
2752194a63aSNoralf Trønnes 
2762194a63aSNoralf Trønnes /**
2772194a63aSNoralf Trønnes  * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
278a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object
2792194a63aSNoralf Trønnes  *
2802194a63aSNoralf Trønnes  * This function removes the requirement that the backing pages are pinned in
281c7fbcb71SThomas Zimmermann  * memory.
2822194a63aSNoralf Trønnes  */
drm_gem_shmem_unpin(struct drm_gem_shmem_object * shmem)283a193f3b4SThomas Zimmermann void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
2842194a63aSNoralf Trønnes {
2853f6a1e22SDmitry Osipenko 	struct drm_gem_object *obj = &shmem->base;
2863f6a1e22SDmitry Osipenko 
2873f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, obj->import_attach);
28852640835SDaniel Vetter 
28921aa27ddSDmitry Osipenko 	dma_resv_lock(shmem->base.resv, NULL);
29021aa27ddSDmitry Osipenko 	drm_gem_shmem_unpin_locked(shmem);
29121aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
2922194a63aSNoralf Trønnes }
2932194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_unpin);
2942194a63aSNoralf Trønnes 
29521aa27ddSDmitry Osipenko /*
29621aa27ddSDmitry Osipenko  * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
29721aa27ddSDmitry Osipenko  * @shmem: shmem GEM object
29821aa27ddSDmitry Osipenko  * @map: Returns the kernel virtual address of the SHMEM GEM object's backing
29921aa27ddSDmitry Osipenko  *       store.
30021aa27ddSDmitry Osipenko  *
30121aa27ddSDmitry Osipenko  * This function makes sure that a contiguous kernel virtual address mapping
30221aa27ddSDmitry Osipenko  * exists for the buffer backing the shmem GEM object. It hides the differences
30321aa27ddSDmitry Osipenko  * between dma-buf imported and natively allocated objects.
30421aa27ddSDmitry Osipenko  *
30521aa27ddSDmitry Osipenko  * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
30621aa27ddSDmitry Osipenko  *
30721aa27ddSDmitry Osipenko  * Returns:
30821aa27ddSDmitry Osipenko  * 0 on success or a negative error code on failure.
30921aa27ddSDmitry Osipenko  */
drm_gem_shmem_vmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)31021aa27ddSDmitry Osipenko int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem,
3117938f421SLucas De Marchi 		       struct iosys_map *map)
3122194a63aSNoralf Trønnes {
3132194a63aSNoralf Trønnes 	struct drm_gem_object *obj = &shmem->base;
3146619ccf1SThomas Zimmermann 	int ret = 0;
3152194a63aSNoralf Trønnes 
3161cad6292SGerd Hoffmann 	if (obj->import_attach) {
31749a3f51dSThomas Zimmermann 		ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
31849a3f51dSThomas Zimmermann 		if (!ret) {
3193f6a1e22SDmitry Osipenko 			if (drm_WARN_ON(obj->dev, map->is_iomem)) {
320df4aaf01SDmitry Osipenko 				dma_buf_vunmap(obj->import_attach->dmabuf, map);
32167fe7487SDmitry Osipenko 				return -EIO;
32249a3f51dSThomas Zimmermann 			}
32349a3f51dSThomas Zimmermann 		}
3241cad6292SGerd Hoffmann 	} else {
3251cad6292SGerd Hoffmann 		pgprot_t prot = PAGE_KERNEL;
3261cad6292SGerd Hoffmann 
32721aa27ddSDmitry Osipenko 		dma_resv_assert_held(shmem->base.resv);
32821aa27ddSDmitry Osipenko 
3292194a63aSNoralf Trønnes 		if (shmem->vmap_use_count++ > 0) {
3302194a63aSNoralf Trønnes 			iosys_map_set_vaddr(map, shmem->vaddr);
3312194a63aSNoralf Trønnes 			return 0;
3322194a63aSNoralf Trønnes 		}
3332194a63aSNoralf Trønnes 
3340cc5fb4eSDaniel Vetter 		ret = drm_gem_shmem_get_pages(shmem);
3350cc5fb4eSDaniel Vetter 		if (ret)
3360cc5fb4eSDaniel Vetter 			goto err_zero_use;
3370cc5fb4eSDaniel Vetter 
3380cf2ef46SThomas Zimmermann 		if (shmem->map_wc)
3391cad6292SGerd Hoffmann 			prot = pgprot_writecombine(prot);
340be7d9f05SBoris Brezillon 		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
3411cad6292SGerd Hoffmann 				    VM_MAP, prot);
3426619ccf1SThomas Zimmermann 		if (!shmem->vaddr)
3436619ccf1SThomas Zimmermann 			ret = -ENOMEM;
34449a3f51dSThomas Zimmermann 		else
3457938f421SLucas De Marchi 			iosys_map_set_vaddr(map, shmem->vaddr);
3461cad6292SGerd Hoffmann 	}
3472194a63aSNoralf Trønnes 
3486619ccf1SThomas Zimmermann 	if (ret) {
3493f6a1e22SDmitry Osipenko 		drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
3502194a63aSNoralf Trønnes 		goto err_put_pages;
3512194a63aSNoralf Trønnes 	}
3522194a63aSNoralf Trønnes 
35349a3f51dSThomas Zimmermann 	return 0;
3542194a63aSNoralf Trønnes 
3552194a63aSNoralf Trønnes err_put_pages:
3560cc5fb4eSDaniel Vetter 	if (!obj->import_attach)
3572194a63aSNoralf Trønnes 		drm_gem_shmem_put_pages(shmem);
3582194a63aSNoralf Trønnes err_zero_use:
3592194a63aSNoralf Trønnes 	shmem->vmap_use_count = 0;
3602194a63aSNoralf Trønnes 
36149a3f51dSThomas Zimmermann 	return ret;
3622194a63aSNoralf Trønnes }
3632194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_vmap);
3642194a63aSNoralf Trønnes 
3652194a63aSNoralf Trønnes /*
3660ae865efSCai Huoqing  * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object
3672194a63aSNoralf Trønnes  * @shmem: shmem GEM object
36849a3f51dSThomas Zimmermann  * @map: Kernel virtual address where the SHMEM GEM object was mapped
3692194a63aSNoralf Trønnes  *
3700b638559SDaniel Vetter  * This function cleans up a kernel virtual address mapping acquired by
3710b638559SDaniel Vetter  * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
3720b638559SDaniel Vetter  * zero.
3730b638559SDaniel Vetter  *
374c7fbcb71SThomas Zimmermann  * This function hides the differences between dma-buf imported and natively
375c7fbcb71SThomas Zimmermann  * allocated objects.
3762194a63aSNoralf Trønnes  */
drm_gem_shmem_vunmap(struct drm_gem_shmem_object * shmem,struct iosys_map * map)3777938f421SLucas De Marchi void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
3787938f421SLucas De Marchi 			  struct iosys_map *map)
3792194a63aSNoralf Trønnes {
38021aa27ddSDmitry Osipenko 	struct drm_gem_object *obj = &shmem->base;
38121aa27ddSDmitry Osipenko 
38221aa27ddSDmitry Osipenko 	if (obj->import_attach) {
38321aa27ddSDmitry Osipenko 		dma_buf_vunmap(obj->import_attach->dmabuf, map);
38421aa27ddSDmitry Osipenko 	} else {
38521aa27ddSDmitry Osipenko 		dma_resv_assert_held(shmem->base.resv);
38621aa27ddSDmitry Osipenko 
38721aa27ddSDmitry Osipenko 		if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
38821aa27ddSDmitry Osipenko 			return;
38921aa27ddSDmitry Osipenko 
39021aa27ddSDmitry Osipenko 		if (--shmem->vmap_use_count > 0)
39121aa27ddSDmitry Osipenko 			return;
39221aa27ddSDmitry Osipenko 
39321aa27ddSDmitry Osipenko 		vunmap(shmem->vaddr);
39421aa27ddSDmitry Osipenko 		drm_gem_shmem_put_pages(shmem);
39521aa27ddSDmitry Osipenko 	}
39621aa27ddSDmitry Osipenko 
39721aa27ddSDmitry Osipenko 	shmem->vaddr = NULL;
3982194a63aSNoralf Trønnes }
3992194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_vunmap);
4002194a63aSNoralf Trønnes 
4013ad8173bSRob Clark static int
drm_gem_shmem_create_with_handle(struct drm_file * file_priv,struct drm_device * dev,size_t size,uint32_t * handle)4022194a63aSNoralf Trønnes drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
4032194a63aSNoralf Trønnes 				 struct drm_device *dev, size_t size,
4042194a63aSNoralf Trønnes 				 uint32_t *handle)
4052194a63aSNoralf Trønnes {
4062194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem;
4072194a63aSNoralf Trønnes 	int ret;
4082194a63aSNoralf Trønnes 
409cfe28f90SDaniel Vetter 	shmem = drm_gem_shmem_create(dev, size);
4102194a63aSNoralf Trønnes 	if (IS_ERR(shmem))
4113ad8173bSRob Clark 		return PTR_ERR(shmem);
4122194a63aSNoralf Trønnes 
4132194a63aSNoralf Trønnes 	/*
4142194a63aSNoralf Trønnes 	 * Allocate an id of idr table where the obj is registered
4152194a63aSNoralf Trønnes 	 * and handle has the id what user can see.
4162194a63aSNoralf Trønnes 	 */
4172194a63aSNoralf Trønnes 	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
4182194a63aSNoralf Trønnes 	/* drop reference from allocate - handle holds it now. */
419be6ee102SEmil Velikov 	drm_gem_object_put(&shmem->base);
4202194a63aSNoralf Trønnes 
4213ad8173bSRob Clark 	return ret;
4222194a63aSNoralf Trønnes }
4232194a63aSNoralf Trønnes 
42417acb9f3SRob Herring /* Update madvise status, returns true if not purged, else
42517acb9f3SRob Herring  * false or -errno.
42617acb9f3SRob Herring  */
drm_gem_shmem_madvise(struct drm_gem_shmem_object * shmem,int madv)427a193f3b4SThomas Zimmermann int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
42817acb9f3SRob Herring {
42921aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
43017acb9f3SRob Herring 
43117acb9f3SRob Herring 	if (shmem->madv >= 0)
43217acb9f3SRob Herring 		shmem->madv = madv;
43317acb9f3SRob Herring 
43417acb9f3SRob Herring 	madv = shmem->madv;
43517acb9f3SRob Herring 
43617acb9f3SRob Herring 	return (madv >= 0);
43717acb9f3SRob Herring }
43817acb9f3SRob Herring EXPORT_SYMBOL(drm_gem_shmem_madvise);
43917acb9f3SRob Herring 
drm_gem_shmem_purge(struct drm_gem_shmem_object * shmem)44021aa27ddSDmitry Osipenko void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem)
44117acb9f3SRob Herring {
442a193f3b4SThomas Zimmermann 	struct drm_gem_object *obj = &shmem->base;
44317acb9f3SRob Herring 	struct drm_device *dev = obj->dev;
44417acb9f3SRob Herring 
44521aa27ddSDmitry Osipenko 	dma_resv_assert_held(shmem->base.resv);
44621aa27ddSDmitry Osipenko 
4473f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
44817acb9f3SRob Herring 
449a193f3b4SThomas Zimmermann 	dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
4504fa3d66fSRob Herring 	sg_free_table(shmem->sgt);
4514fa3d66fSRob Herring 	kfree(shmem->sgt);
4524fa3d66fSRob Herring 	shmem->sgt = NULL;
4534fa3d66fSRob Herring 
45421aa27ddSDmitry Osipenko 	drm_gem_shmem_put_pages(shmem);
45517acb9f3SRob Herring 
45617acb9f3SRob Herring 	shmem->madv = -1;
45717acb9f3SRob Herring 
45817acb9f3SRob Herring 	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
45917acb9f3SRob Herring 	drm_gem_free_mmap_offset(obj);
46017acb9f3SRob Herring 
46117acb9f3SRob Herring 	/* Our goal here is to return as much of the memory as
46217acb9f3SRob Herring 	 * is possible back to the system as we are called from OOM.
46317acb9f3SRob Herring 	 * To do this we must instruct the shmfs to drop all of its
46417acb9f3SRob Herring 	 * backing pages, *now*.
46517acb9f3SRob Herring 	 */
46617acb9f3SRob Herring 	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
46717acb9f3SRob Herring 
468a193f3b4SThomas Zimmermann 	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1);
46917acb9f3SRob Herring }
47017acb9f3SRob Herring EXPORT_SYMBOL(drm_gem_shmem_purge);
47117acb9f3SRob Herring 
4722194a63aSNoralf Trønnes /**
4732194a63aSNoralf Trønnes  * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
4742194a63aSNoralf Trønnes  * @file: DRM file structure to create the dumb buffer for
4752194a63aSNoralf Trønnes  * @dev: DRM device
4762194a63aSNoralf Trønnes  * @args: IOCTL data
4772194a63aSNoralf Trønnes  *
4782194a63aSNoralf Trønnes  * This function computes the pitch of the dumb buffer and rounds it up to an
4792194a63aSNoralf Trønnes  * integer number of bytes per pixel. Drivers for hardware that doesn't have
4802194a63aSNoralf Trønnes  * any additional restrictions on the pitch can directly use this function as
4812194a63aSNoralf Trønnes  * their &drm_driver.dumb_create callback.
4822194a63aSNoralf Trønnes  *
4832194a63aSNoralf Trønnes  * For hardware with additional restrictions, drivers can adjust the fields
4842194a63aSNoralf Trønnes  * set up by userspace before calling into this function.
4852194a63aSNoralf Trønnes  *
4862194a63aSNoralf Trønnes  * Returns:
4872194a63aSNoralf Trønnes  * 0 on success or a negative error code on failure.
4882194a63aSNoralf Trønnes  */
drm_gem_shmem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)4892194a63aSNoralf Trønnes int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
4902194a63aSNoralf Trønnes 			      struct drm_mode_create_dumb *args)
4912194a63aSNoralf Trønnes {
4922194a63aSNoralf Trønnes 	u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
4932194a63aSNoralf Trønnes 
4942194a63aSNoralf Trønnes 	if (!args->pitch || !args->size) {
4952194a63aSNoralf Trønnes 		args->pitch = min_pitch;
49635d28365SDaniel Vetter 		args->size = PAGE_ALIGN(args->pitch * args->height);
4972194a63aSNoralf Trønnes 	} else {
4982194a63aSNoralf Trønnes 		/* ensure sane minimum values */
4992194a63aSNoralf Trønnes 		if (args->pitch < min_pitch)
5002194a63aSNoralf Trønnes 			args->pitch = min_pitch;
5012194a63aSNoralf Trønnes 		if (args->size < args->pitch * args->height)
50235d28365SDaniel Vetter 			args->size = PAGE_ALIGN(args->pitch * args->height);
5032194a63aSNoralf Trønnes 	}
5042194a63aSNoralf Trønnes 
5053ad8173bSRob Clark 	return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
5062194a63aSNoralf Trønnes }
5072194a63aSNoralf Trønnes EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
5082194a63aSNoralf Trønnes 
drm_gem_shmem_fault(struct vm_fault * vmf)5092194a63aSNoralf Trønnes static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
5102194a63aSNoralf Trønnes {
5112194a63aSNoralf Trønnes 	struct vm_area_struct *vma = vmf->vma;
5122194a63aSNoralf Trønnes 	struct drm_gem_object *obj = vma->vm_private_data;
5132194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
5142194a63aSNoralf Trønnes 	loff_t num_pages = obj->size >> PAGE_SHIFT;
515d611b4a0SNeil Roberts 	vm_fault_t ret;
5162194a63aSNoralf Trønnes 	struct page *page;
51711d5a474SNeil Roberts 	pgoff_t page_offset;
51811d5a474SNeil Roberts 
51911d5a474SNeil Roberts 	/* We don't use vmf->pgoff since that has the fake offset */
52011d5a474SNeil Roberts 	page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
5212194a63aSNoralf Trønnes 
52221aa27ddSDmitry Osipenko 	dma_resv_lock(shmem->base.resv, NULL);
5232194a63aSNoralf Trønnes 
52411d5a474SNeil Roberts 	if (page_offset >= num_pages ||
5253f6a1e22SDmitry Osipenko 	    drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
526d611b4a0SNeil Roberts 	    shmem->madv < 0) {
527d611b4a0SNeil Roberts 		ret = VM_FAULT_SIGBUS;
528d611b4a0SNeil Roberts 	} else {
52911d5a474SNeil Roberts 		page = shmem->pages[page_offset];
5302194a63aSNoralf Trønnes 
5318b93d1d7SDaniel Vetter 		ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
532d611b4a0SNeil Roberts 	}
533d611b4a0SNeil Roberts 
53421aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
535d611b4a0SNeil Roberts 
536d611b4a0SNeil Roberts 	return ret;
5372194a63aSNoralf Trønnes }
5382194a63aSNoralf Trønnes 
drm_gem_shmem_vm_open(struct vm_area_struct * vma)5392194a63aSNoralf Trønnes static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
5402194a63aSNoralf Trønnes {
5412194a63aSNoralf Trønnes 	struct drm_gem_object *obj = vma->vm_private_data;
5422194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
5432194a63aSNoralf Trønnes 
5443f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, obj->import_attach);
54552640835SDaniel Vetter 
54621aa27ddSDmitry Osipenko 	dma_resv_lock(shmem->base.resv, NULL);
54709bf649aSRob Clark 
54809bf649aSRob Clark 	/*
54909bf649aSRob Clark 	 * We should have already pinned the pages when the buffer was first
55009bf649aSRob Clark 	 * mmap'd, vm_open() just grabs an additional reference for the new
55109bf649aSRob Clark 	 * mm the vma is getting copied into (ie. on fork()).
55209bf649aSRob Clark 	 */
5533f6a1e22SDmitry Osipenko 	if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
55409bf649aSRob Clark 		shmem->pages_use_count++;
55509bf649aSRob Clark 
55621aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
5572194a63aSNoralf Trønnes 
5582194a63aSNoralf Trønnes 	drm_gem_vm_open(vma);
5592194a63aSNoralf Trønnes }
5602194a63aSNoralf Trønnes 
drm_gem_shmem_vm_close(struct vm_area_struct * vma)5612194a63aSNoralf Trønnes static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
5622194a63aSNoralf Trønnes {
5632194a63aSNoralf Trønnes 	struct drm_gem_object *obj = vma->vm_private_data;
5642194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
5652194a63aSNoralf Trønnes 
56621aa27ddSDmitry Osipenko 	dma_resv_lock(shmem->base.resv, NULL);
5672194a63aSNoralf Trønnes 	drm_gem_shmem_put_pages(shmem);
56821aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
56921aa27ddSDmitry Osipenko 
5702194a63aSNoralf Trønnes 	drm_gem_vm_close(vma);
5712194a63aSNoralf Trønnes }
5722194a63aSNoralf Trønnes 
573d315bdbfSThomas Zimmermann const struct vm_operations_struct drm_gem_shmem_vm_ops = {
5742194a63aSNoralf Trønnes 	.fault = drm_gem_shmem_fault,
5752194a63aSNoralf Trønnes 	.open = drm_gem_shmem_vm_open,
5762194a63aSNoralf Trønnes 	.close = drm_gem_shmem_vm_close,
5772194a63aSNoralf Trønnes };
578d315bdbfSThomas Zimmermann EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
5792194a63aSNoralf Trønnes 
5802194a63aSNoralf Trønnes /**
5812194a63aSNoralf Trønnes  * drm_gem_shmem_mmap - Memory-map a shmem GEM object
582a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object
5832194a63aSNoralf Trønnes  * @vma: VMA for the area to be mapped
5842194a63aSNoralf Trønnes  *
5852194a63aSNoralf Trønnes  * This function implements an augmented version of the GEM DRM file mmap
586c7fbcb71SThomas Zimmermann  * operation for shmem objects.
5872194a63aSNoralf Trønnes  *
5882194a63aSNoralf Trønnes  * Returns:
5892194a63aSNoralf Trønnes  * 0 on success or a negative error code on failure.
5902194a63aSNoralf Trønnes  */
drm_gem_shmem_mmap(struct drm_gem_shmem_object * shmem,struct vm_area_struct * vma)591a193f3b4SThomas Zimmermann int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma)
5922194a63aSNoralf Trønnes {
593a193f3b4SThomas Zimmermann 	struct drm_gem_object *obj = &shmem->base;
5942194a63aSNoralf Trønnes 	int ret;
5952194a63aSNoralf Trønnes 
596f49a51bfSDaniel Vetter 	if (obj->import_attach) {
59707dd476fSBoris Brezillon 		/* Reset both vm_ops and vm_private_data, so we don't end up with
59807dd476fSBoris Brezillon 		 * vm_ops pointing to our implementation if the dma-buf backend
59907dd476fSBoris Brezillon 		 * doesn't set those fields.
60007dd476fSBoris Brezillon 		 */
601f49a51bfSDaniel Vetter 		vma->vm_private_data = NULL;
60207dd476fSBoris Brezillon 		vma->vm_ops = NULL;
60307dd476fSBoris Brezillon 
604ee9adb7aSDmitry Osipenko 		ret = dma_buf_mmap(obj->dma_buf, vma, 0);
605f49a51bfSDaniel Vetter 
606ee9adb7aSDmitry Osipenko 		/* Drop the reference drm_gem_mmap_obj() acquired.*/
607ee9adb7aSDmitry Osipenko 		if (!ret)
608ee9adb7aSDmitry Osipenko 			drm_gem_object_put(obj);
609ee9adb7aSDmitry Osipenko 
610ee9adb7aSDmitry Osipenko 		return ret;
611f49a51bfSDaniel Vetter 	}
61226d3ac3cSDaniel Vetter 
613*1b4a8b89SWachowski, Karol 	if (is_cow_mapping(vma->vm_flags))
614*1b4a8b89SWachowski, Karol 		return -EINVAL;
615*1b4a8b89SWachowski, Karol 
61621aa27ddSDmitry Osipenko 	dma_resv_lock(shmem->base.resv, NULL);
6172194a63aSNoralf Trønnes 	ret = drm_gem_shmem_get_pages(shmem);
61821aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
61921aa27ddSDmitry Osipenko 
62024013314SRob Clark 	if (ret)
6212194a63aSNoralf Trønnes 		return ret;
6222194a63aSNoralf Trønnes 
6231c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
6241cad6292SGerd Hoffmann 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6250cf2ef46SThomas Zimmermann 	if (shmem->map_wc)
6261cad6292SGerd Hoffmann 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
6272194a63aSNoralf Trønnes 
6282194a63aSNoralf Trønnes 	return 0;
6292194a63aSNoralf Trønnes }
6302194a63aSNoralf Trønnes EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
6312194a63aSNoralf Trønnes 
6322194a63aSNoralf Trønnes /**
6332194a63aSNoralf Trønnes  * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
634a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object
6352194a63aSNoralf Trønnes  * @p: DRM printer
6362194a63aSNoralf Trønnes  * @indent: Tab indentation level
6372194a63aSNoralf Trønnes  */
drm_gem_shmem_print_info(const struct drm_gem_shmem_object * shmem,struct drm_printer * p,unsigned int indent)638a193f3b4SThomas Zimmermann void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
639a193f3b4SThomas Zimmermann 			      struct drm_printer *p, unsigned int indent)
6402194a63aSNoralf Trønnes {
64167fe7487SDmitry Osipenko 	if (shmem->base.import_attach)
64267fe7487SDmitry Osipenko 		return;
64367fe7487SDmitry Osipenko 
6442194a63aSNoralf Trønnes 	drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
6452194a63aSNoralf Trønnes 	drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
6462194a63aSNoralf Trønnes 	drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
6472194a63aSNoralf Trønnes }
6482194a63aSNoralf Trønnes EXPORT_SYMBOL(drm_gem_shmem_print_info);
6492194a63aSNoralf Trønnes 
6502194a63aSNoralf Trønnes /**
6512194a63aSNoralf Trønnes  * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
6522194a63aSNoralf Trønnes  *                              pages for a shmem GEM object
653a193f3b4SThomas Zimmermann  * @shmem: shmem GEM object
6542194a63aSNoralf Trønnes  *
6552194a63aSNoralf Trønnes  * This function exports a scatter/gather table suitable for PRIME usage by
656c7fbcb71SThomas Zimmermann  * calling the standard DMA mapping API.
6570b638559SDaniel Vetter  *
6580b638559SDaniel Vetter  * Drivers who need to acquire an scatter/gather table for objects need to call
6590b638559SDaniel Vetter  * drm_gem_shmem_get_pages_sgt() instead.
6602194a63aSNoralf Trønnes  *
6612194a63aSNoralf Trønnes  * Returns:
6622b8428a1SLiu Zixian  * A pointer to the scatter/gather table of pinned pages or error pointer on failure.
6632194a63aSNoralf Trønnes  */
drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object * shmem)664a193f3b4SThomas Zimmermann struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
6652194a63aSNoralf Trønnes {
666a193f3b4SThomas Zimmermann 	struct drm_gem_object *obj = &shmem->base;
6672194a63aSNoralf Trønnes 
6683f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, obj->import_attach);
66952640835SDaniel Vetter 
670707d561fSGerd Hoffmann 	return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
6712194a63aSNoralf Trønnes }
6722194a63aSNoralf Trønnes EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
6732194a63aSNoralf Trønnes 
drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object * shmem)674ddddedaaSAsahi Lina static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
6752194a63aSNoralf Trønnes {
676a193f3b4SThomas Zimmermann 	struct drm_gem_object *obj = &shmem->base;
6772194a63aSNoralf Trønnes 	int ret;
6782194a63aSNoralf Trønnes 	struct sg_table *sgt;
6792194a63aSNoralf Trønnes 
6802194a63aSNoralf Trønnes 	if (shmem->sgt)
6812194a63aSNoralf Trønnes 		return shmem->sgt;
6822194a63aSNoralf Trønnes 
6833f6a1e22SDmitry Osipenko 	drm_WARN_ON(obj->dev, obj->import_attach);
6842194a63aSNoralf Trønnes 
68521aa27ddSDmitry Osipenko 	ret = drm_gem_shmem_get_pages(shmem);
6862194a63aSNoralf Trønnes 	if (ret)
6872194a63aSNoralf Trønnes 		return ERR_PTR(ret);
6882194a63aSNoralf Trønnes 
689a193f3b4SThomas Zimmermann 	sgt = drm_gem_shmem_get_sg_table(shmem);
6902194a63aSNoralf Trønnes 	if (IS_ERR(sgt)) {
6912194a63aSNoralf Trønnes 		ret = PTR_ERR(sgt);
6922194a63aSNoralf Trønnes 		goto err_put_pages;
6932194a63aSNoralf Trønnes 	}
6942194a63aSNoralf Trønnes 	/* Map the pages for use by the h/w. */
6956c6fa39cSMarek Szyprowski 	ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
6966c6fa39cSMarek Szyprowski 	if (ret)
6976c6fa39cSMarek Szyprowski 		goto err_free_sgt;
6982194a63aSNoralf Trønnes 
6992194a63aSNoralf Trønnes 	shmem->sgt = sgt;
7002194a63aSNoralf Trønnes 
7012194a63aSNoralf Trønnes 	return sgt;
7022194a63aSNoralf Trønnes 
7036c6fa39cSMarek Szyprowski err_free_sgt:
7046c6fa39cSMarek Szyprowski 	sg_free_table(sgt);
7056c6fa39cSMarek Szyprowski 	kfree(sgt);
7062194a63aSNoralf Trønnes err_put_pages:
70721aa27ddSDmitry Osipenko 	drm_gem_shmem_put_pages(shmem);
7082194a63aSNoralf Trønnes 	return ERR_PTR(ret);
7092194a63aSNoralf Trønnes }
710ddddedaaSAsahi Lina 
711ddddedaaSAsahi Lina /**
712ddddedaaSAsahi Lina  * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
713ddddedaaSAsahi Lina  *				 scatter/gather table for a shmem GEM object.
714ddddedaaSAsahi Lina  * @shmem: shmem GEM object
715ddddedaaSAsahi Lina  *
716ddddedaaSAsahi Lina  * This function returns a scatter/gather table suitable for driver usage. If
717ddddedaaSAsahi Lina  * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
718ddddedaaSAsahi Lina  * table created.
719ddddedaaSAsahi Lina  *
720ddddedaaSAsahi Lina  * This is the main function for drivers to get at backing storage, and it hides
721ddddedaaSAsahi Lina  * and difference between dma-buf imported and natively allocated objects.
722ddddedaaSAsahi Lina  * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
723ddddedaaSAsahi Lina  *
724ddddedaaSAsahi Lina  * Returns:
725ddddedaaSAsahi Lina  * A pointer to the scatter/gather table of pinned pages or errno on failure.
726ddddedaaSAsahi Lina  */
drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object * shmem)727ddddedaaSAsahi Lina struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
728ddddedaaSAsahi Lina {
729ddddedaaSAsahi Lina 	int ret;
730ddddedaaSAsahi Lina 	struct sg_table *sgt;
731ddddedaaSAsahi Lina 
73221aa27ddSDmitry Osipenko 	ret = dma_resv_lock_interruptible(shmem->base.resv, NULL);
733ddddedaaSAsahi Lina 	if (ret)
734ddddedaaSAsahi Lina 		return ERR_PTR(ret);
735ddddedaaSAsahi Lina 	sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
73621aa27ddSDmitry Osipenko 	dma_resv_unlock(shmem->base.resv);
737ddddedaaSAsahi Lina 
738ddddedaaSAsahi Lina 	return sgt;
739ddddedaaSAsahi Lina }
740047a7545SAsahi Lina EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
7412194a63aSNoralf Trønnes 
7422194a63aSNoralf Trønnes /**
7432194a63aSNoralf Trønnes  * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
7442194a63aSNoralf Trønnes  *                 another driver's scatter/gather table of pinned pages
7452194a63aSNoralf Trønnes  * @dev: Device to import into
7462194a63aSNoralf Trønnes  * @attach: DMA-BUF attachment
7472194a63aSNoralf Trønnes  * @sgt: Scatter/gather table of pinned pages
7482194a63aSNoralf Trønnes  *
7492194a63aSNoralf Trønnes  * This function imports a scatter/gather table exported via DMA-BUF by
7502194a63aSNoralf Trønnes  * another driver. Drivers that use the shmem helpers should set this as their
7512194a63aSNoralf Trønnes  * &drm_driver.gem_prime_import_sg_table callback.
7522194a63aSNoralf Trønnes  *
7532194a63aSNoralf Trønnes  * Returns:
7542194a63aSNoralf Trønnes  * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
7552194a63aSNoralf Trønnes  * error code on failure.
7562194a63aSNoralf Trønnes  */
7572194a63aSNoralf Trønnes struct drm_gem_object *
drm_gem_shmem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)7582194a63aSNoralf Trønnes drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
7592194a63aSNoralf Trønnes 				    struct dma_buf_attachment *attach,
7602194a63aSNoralf Trønnes 				    struct sg_table *sgt)
7612194a63aSNoralf Trønnes {
7622194a63aSNoralf Trønnes 	size_t size = PAGE_ALIGN(attach->dmabuf->size);
7632194a63aSNoralf Trønnes 	struct drm_gem_shmem_object *shmem;
7642194a63aSNoralf Trønnes 
765cfe28f90SDaniel Vetter 	shmem = __drm_gem_shmem_create(dev, size, true);
7662194a63aSNoralf Trønnes 	if (IS_ERR(shmem))
7672194a63aSNoralf Trønnes 		return ERR_CAST(shmem);
7682194a63aSNoralf Trønnes 
7692194a63aSNoralf Trønnes 	shmem->sgt = sgt;
7702194a63aSNoralf Trønnes 
77146652809SSiddh Raman Pant 	drm_dbg_prime(dev, "size = %zu\n", size);
7722194a63aSNoralf Trønnes 
7732194a63aSNoralf Trønnes 	return &shmem->base;
7742194a63aSNoralf Trønnes }
7752194a63aSNoralf Trønnes EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
7764b2b5e14SThomas Zimmermann 
7774b2b5e14SThomas Zimmermann MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
77867505311SMarcel Ziswiler MODULE_IMPORT_NS(DMA_BUF);
7794b2b5e14SThomas Zimmermann MODULE_LICENSE("GPL v2");
780