xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision bcc5c9d50e93bb7d949f6f38063b62dd35ca84d1)
1673a394bSEric Anholt /*
2673a394bSEric Anholt  * Copyright © 2008 Intel Corporation
3673a394bSEric Anholt  *
4673a394bSEric Anholt  * Permission is hereby granted, free of charge, to any person obtaining a
5673a394bSEric Anholt  * copy of this software and associated documentation files (the "Software"),
6673a394bSEric Anholt  * to deal in the Software without restriction, including without limitation
7673a394bSEric Anholt  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8673a394bSEric Anholt  * and/or sell copies of the Software, and to permit persons to whom the
9673a394bSEric Anholt  * Software is furnished to do so, subject to the following conditions:
10673a394bSEric Anholt  *
11673a394bSEric Anholt  * The above copyright notice and this permission notice (including the next
12673a394bSEric Anholt  * paragraph) shall be included in all copies or substantial portions of the
13673a394bSEric Anholt  * Software.
14673a394bSEric Anholt  *
15673a394bSEric Anholt  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16673a394bSEric Anholt  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17673a394bSEric Anholt  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18673a394bSEric Anholt  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19673a394bSEric Anholt  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20673a394bSEric Anholt  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21673a394bSEric Anholt  * IN THE SOFTWARE.
22673a394bSEric Anholt  *
23673a394bSEric Anholt  * Authors:
24673a394bSEric Anholt  *    Eric Anholt <eric@anholt.net>
25673a394bSEric Anholt  *
26673a394bSEric Anholt  */
27673a394bSEric Anholt 
28673a394bSEric Anholt #include <linux/types.h>
29673a394bSEric Anholt #include <linux/slab.h>
30673a394bSEric Anholt #include <linux/mm.h>
31673a394bSEric Anholt #include <linux/uaccess.h>
32673a394bSEric Anholt #include <linux/fs.h>
33673a394bSEric Anholt #include <linux/file.h>
34673a394bSEric Anholt #include <linux/module.h>
35673a394bSEric Anholt #include <linux/mman.h>
36673a394bSEric Anholt #include <linux/pagemap.h>
375949eac4SHugh Dickins #include <linux/shmem_fs.h>
383248877eSDave Airlie #include <linux/dma-buf.h>
39760285e7SDavid Howells #include <drm/drmP.h>
400de23977SDavid Herrmann #include <drm/drm_vma_manager.h>
41673a394bSEric Anholt 
42673a394bSEric Anholt /** @file drm_gem.c
43673a394bSEric Anholt  *
44673a394bSEric Anholt  * This file provides some of the base ioctls and library routines for
45673a394bSEric Anholt  * the graphics memory manager implemented by each device driver.
46673a394bSEric Anholt  *
47673a394bSEric Anholt  * Because various devices have different requirements in terms of
48673a394bSEric Anholt  * synchronization and migration strategies, implementing that is left up to
49673a394bSEric Anholt  * the driver, and all that the general API provides should be generic --
50673a394bSEric Anholt  * allocating objects, reading/writing data with the cpu, freeing objects.
51673a394bSEric Anholt  * Even there, platform-dependent optimizations for reading/writing data with
52673a394bSEric Anholt  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
53673a394bSEric Anholt  * the DRI2 implementation wants to have at least allocate/mmap be generic.
54673a394bSEric Anholt  *
55673a394bSEric Anholt  * The goal was to have swap-backed object allocation managed through
56673a394bSEric Anholt  * struct file.  However, file descriptors as handles to a struct file have
57673a394bSEric Anholt  * two major failings:
58673a394bSEric Anholt  * - Process limits prevent more than 1024 or so being used at a time by
59673a394bSEric Anholt  *   default.
60673a394bSEric Anholt  * - Inability to allocate high fds will aggravate the X Server's select()
61673a394bSEric Anholt  *   handling, and likely that of many GL client applications as well.
62673a394bSEric Anholt  *
63673a394bSEric Anholt  * This led to a plan of using our own integer IDs (called handles, following
64673a394bSEric Anholt  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65673a394bSEric Anholt  * ioctls.  The objects themselves will still include the struct file so
66673a394bSEric Anholt  * that we can transition to fds if the required kernel infrastructure shows
67673a394bSEric Anholt  * up at a later date, and as our interface with shmfs for memory allocation.
68673a394bSEric Anholt  */
69673a394bSEric Anholt 
70a2c0a97bSJesse Barnes /*
71a2c0a97bSJesse Barnes  * We make up offsets for buffer objects so we can recognize them at
72a2c0a97bSJesse Barnes  * mmap time.
73a2c0a97bSJesse Barnes  */
7405269a3aSJordan Crouse 
7505269a3aSJordan Crouse /* pgoff in mmap is an unsigned long, so we need to make sure that
7605269a3aSJordan Crouse  * the faked up offset will fit
7705269a3aSJordan Crouse  */
7805269a3aSJordan Crouse 
7905269a3aSJordan Crouse #if BITS_PER_LONG == 64
80a2c0a97bSJesse Barnes #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81a2c0a97bSJesse Barnes #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
8205269a3aSJordan Crouse #else
8305269a3aSJordan Crouse #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
8405269a3aSJordan Crouse #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
8505269a3aSJordan Crouse #endif
86a2c0a97bSJesse Barnes 
87673a394bSEric Anholt /**
88673a394bSEric Anholt  * Initialize the GEM device fields
89673a394bSEric Anholt  */
90673a394bSEric Anholt 
91673a394bSEric Anholt int
92673a394bSEric Anholt drm_gem_init(struct drm_device *dev)
93673a394bSEric Anholt {
94a2c0a97bSJesse Barnes 	struct drm_gem_mm *mm;
95a2c0a97bSJesse Barnes 
96673a394bSEric Anholt 	spin_lock_init(&dev->object_name_lock);
97673a394bSEric Anholt 	idr_init(&dev->object_name_idr);
98a2c0a97bSJesse Barnes 
999a298b2aSEric Anholt 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
100a2c0a97bSJesse Barnes 	if (!mm) {
101a2c0a97bSJesse Barnes 		DRM_ERROR("out of memory\n");
102a2c0a97bSJesse Barnes 		return -ENOMEM;
103a2c0a97bSJesse Barnes 	}
104a2c0a97bSJesse Barnes 
105a2c0a97bSJesse Barnes 	dev->mm_private = mm;
1060de23977SDavid Herrmann 	drm_vma_offset_manager_init(&mm->vma_manager,
1070de23977SDavid Herrmann 				    DRM_FILE_PAGE_OFFSET_START,
10877ef8bbcSDavid Herrmann 				    DRM_FILE_PAGE_OFFSET_SIZE);
109a2c0a97bSJesse Barnes 
110673a394bSEric Anholt 	return 0;
111673a394bSEric Anholt }
112673a394bSEric Anholt 
113a2c0a97bSJesse Barnes void
114a2c0a97bSJesse Barnes drm_gem_destroy(struct drm_device *dev)
115a2c0a97bSJesse Barnes {
116a2c0a97bSJesse Barnes 	struct drm_gem_mm *mm = dev->mm_private;
117a2c0a97bSJesse Barnes 
1180de23977SDavid Herrmann 	drm_vma_offset_manager_destroy(&mm->vma_manager);
1199a298b2aSEric Anholt 	kfree(mm);
120a2c0a97bSJesse Barnes 	dev->mm_private = NULL;
121a2c0a97bSJesse Barnes }
122a2c0a97bSJesse Barnes 
123673a394bSEric Anholt /**
12462cb7011SAlan Cox  * Initialize an already allocated GEM object of the specified size with
1251d397043SDaniel Vetter  * shmfs backing store.
1261d397043SDaniel Vetter  */
1271d397043SDaniel Vetter int drm_gem_object_init(struct drm_device *dev,
1281d397043SDaniel Vetter 			struct drm_gem_object *obj, size_t size)
1291d397043SDaniel Vetter {
13089c8233fSDavid Herrmann 	struct file *filp;
1311d397043SDaniel Vetter 
13289c8233fSDavid Herrmann 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
13389c8233fSDavid Herrmann 	if (IS_ERR(filp))
13489c8233fSDavid Herrmann 		return PTR_ERR(filp);
1351d397043SDaniel Vetter 
13689c8233fSDavid Herrmann 	drm_gem_private_object_init(dev, obj, size);
13789c8233fSDavid Herrmann 	obj->filp = filp;
1381d397043SDaniel Vetter 
1391d397043SDaniel Vetter 	return 0;
1401d397043SDaniel Vetter }
1411d397043SDaniel Vetter EXPORT_SYMBOL(drm_gem_object_init);
1421d397043SDaniel Vetter 
1431d397043SDaniel Vetter /**
14462cb7011SAlan Cox  * Initialize an already allocated GEM object of the specified size with
14562cb7011SAlan Cox  * no GEM provided backing store. Instead the caller is responsible for
14662cb7011SAlan Cox  * backing the object and handling it.
14762cb7011SAlan Cox  */
14889c8233fSDavid Herrmann void drm_gem_private_object_init(struct drm_device *dev,
14962cb7011SAlan Cox 				 struct drm_gem_object *obj, size_t size)
15062cb7011SAlan Cox {
15162cb7011SAlan Cox 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
15262cb7011SAlan Cox 
15362cb7011SAlan Cox 	obj->dev = dev;
15462cb7011SAlan Cox 	obj->filp = NULL;
15562cb7011SAlan Cox 
15662cb7011SAlan Cox 	kref_init(&obj->refcount);
15762cb7011SAlan Cox 	atomic_set(&obj->handle_count, 0);
15862cb7011SAlan Cox 	obj->size = size;
15962cb7011SAlan Cox }
16062cb7011SAlan Cox EXPORT_SYMBOL(drm_gem_private_object_init);
16162cb7011SAlan Cox 
16262cb7011SAlan Cox /**
163673a394bSEric Anholt  * Allocate a GEM object of the specified size with shmfs backing store
164673a394bSEric Anholt  */
165673a394bSEric Anholt struct drm_gem_object *
166673a394bSEric Anholt drm_gem_object_alloc(struct drm_device *dev, size_t size)
167673a394bSEric Anholt {
168673a394bSEric Anholt 	struct drm_gem_object *obj;
169673a394bSEric Anholt 
170b798b1feSRobert P. J. Day 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
171845792d9SJiri Slaby 	if (!obj)
172845792d9SJiri Slaby 		goto free;
173673a394bSEric Anholt 
1741d397043SDaniel Vetter 	if (drm_gem_object_init(dev, obj, size) != 0)
175845792d9SJiri Slaby 		goto free;
176673a394bSEric Anholt 
177673a394bSEric Anholt 	if (dev->driver->gem_init_object != NULL &&
178673a394bSEric Anholt 	    dev->driver->gem_init_object(obj) != 0) {
179845792d9SJiri Slaby 		goto fput;
180673a394bSEric Anholt 	}
181673a394bSEric Anholt 	return obj;
182845792d9SJiri Slaby fput:
1831d397043SDaniel Vetter 	/* Object_init mangles the global counters - readjust them. */
184845792d9SJiri Slaby 	fput(obj->filp);
185845792d9SJiri Slaby free:
186845792d9SJiri Slaby 	kfree(obj);
187845792d9SJiri Slaby 	return NULL;
188673a394bSEric Anholt }
189673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_alloc);
190673a394bSEric Anholt 
1910ff926c7SDave Airlie static void
1920ff926c7SDave Airlie drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
1930ff926c7SDave Airlie {
1940ff926c7SDave Airlie 	if (obj->import_attach) {
195219b4733SDave Airlie 		drm_prime_remove_buf_handle(&filp->prime,
1960ff926c7SDave Airlie 				obj->import_attach->dmabuf);
1970ff926c7SDave Airlie 	}
1980ff926c7SDave Airlie 	if (obj->export_dma_buf) {
199219b4733SDave Airlie 		drm_prime_remove_buf_handle(&filp->prime,
2000ff926c7SDave Airlie 				obj->export_dma_buf);
2010ff926c7SDave Airlie 	}
2020ff926c7SDave Airlie }
2030ff926c7SDave Airlie 
204673a394bSEric Anholt /**
205673a394bSEric Anholt  * Removes the mapping from handle to filp for this object.
206673a394bSEric Anholt  */
207ff72145bSDave Airlie int
208a1a2d1d3SPekka Paalanen drm_gem_handle_delete(struct drm_file *filp, u32 handle)
209673a394bSEric Anholt {
210673a394bSEric Anholt 	struct drm_device *dev;
211673a394bSEric Anholt 	struct drm_gem_object *obj;
212673a394bSEric Anholt 
213673a394bSEric Anholt 	/* This is gross. The idr system doesn't let us try a delete and
214673a394bSEric Anholt 	 * return an error code.  It just spews if you fail at deleting.
215673a394bSEric Anholt 	 * So, we have to grab a lock around finding the object and then
216673a394bSEric Anholt 	 * doing the delete on it and dropping the refcount, or the user
217673a394bSEric Anholt 	 * could race us to double-decrement the refcount and cause a
218673a394bSEric Anholt 	 * use-after-free later.  Given the frequency of our handle lookups,
219673a394bSEric Anholt 	 * we may want to use ida for number allocation and a hash table
220673a394bSEric Anholt 	 * for the pointers, anyway.
221673a394bSEric Anholt 	 */
222673a394bSEric Anholt 	spin_lock(&filp->table_lock);
223673a394bSEric Anholt 
224673a394bSEric Anholt 	/* Check if we currently have a reference on the object */
225673a394bSEric Anholt 	obj = idr_find(&filp->object_idr, handle);
226673a394bSEric Anholt 	if (obj == NULL) {
227673a394bSEric Anholt 		spin_unlock(&filp->table_lock);
228673a394bSEric Anholt 		return -EINVAL;
229673a394bSEric Anholt 	}
230673a394bSEric Anholt 	dev = obj->dev;
231673a394bSEric Anholt 
232673a394bSEric Anholt 	/* Release reference and decrement refcount. */
233673a394bSEric Anholt 	idr_remove(&filp->object_idr, handle);
234673a394bSEric Anholt 	spin_unlock(&filp->table_lock);
235673a394bSEric Anholt 
2360ff926c7SDave Airlie 	drm_gem_remove_prime_handles(obj, filp);
2373248877eSDave Airlie 
238304eda32SBen Skeggs 	if (dev->driver->gem_close_object)
239304eda32SBen Skeggs 		dev->driver->gem_close_object(obj, filp);
240bc9025bdSLuca Barbieri 	drm_gem_object_handle_unreference_unlocked(obj);
241673a394bSEric Anholt 
242673a394bSEric Anholt 	return 0;
243673a394bSEric Anholt }
244ff72145bSDave Airlie EXPORT_SYMBOL(drm_gem_handle_delete);
245673a394bSEric Anholt 
246673a394bSEric Anholt /**
24743387b37SDaniel Vetter  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
24843387b37SDaniel Vetter  *
24943387b37SDaniel Vetter  * This implements the ->dumb_destroy kms driver callback for drivers which use
25043387b37SDaniel Vetter  * gem to manage their backing storage.
25143387b37SDaniel Vetter  */
25243387b37SDaniel Vetter int drm_gem_dumb_destroy(struct drm_file *file,
25343387b37SDaniel Vetter 			 struct drm_device *dev,
25443387b37SDaniel Vetter 			 uint32_t handle)
25543387b37SDaniel Vetter {
25643387b37SDaniel Vetter 	return drm_gem_handle_delete(file, handle);
25743387b37SDaniel Vetter }
25843387b37SDaniel Vetter EXPORT_SYMBOL(drm_gem_dumb_destroy);
25943387b37SDaniel Vetter 
26043387b37SDaniel Vetter /**
261673a394bSEric Anholt  * Create a handle for this object. This adds a handle reference
262673a394bSEric Anholt  * to the object, which includes a regular reference count. Callers
263673a394bSEric Anholt  * will likely want to dereference the object afterwards.
264673a394bSEric Anholt  */
265673a394bSEric Anholt int
266673a394bSEric Anholt drm_gem_handle_create(struct drm_file *file_priv,
267673a394bSEric Anholt 		       struct drm_gem_object *obj,
268a1a2d1d3SPekka Paalanen 		       u32 *handlep)
269673a394bSEric Anholt {
270304eda32SBen Skeggs 	struct drm_device *dev = obj->dev;
271673a394bSEric Anholt 	int ret;
272673a394bSEric Anholt 
273673a394bSEric Anholt 	/*
2742e928815STejun Heo 	 * Get the user-visible handle using idr.  Preload and perform
2752e928815STejun Heo 	 * allocation under our spinlock.
276673a394bSEric Anholt 	 */
2772e928815STejun Heo 	idr_preload(GFP_KERNEL);
278673a394bSEric Anholt 	spin_lock(&file_priv->table_lock);
2792e928815STejun Heo 
2802e928815STejun Heo 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
2812e928815STejun Heo 
282673a394bSEric Anholt 	spin_unlock(&file_priv->table_lock);
2832e928815STejun Heo 	idr_preload_end();
2842e928815STejun Heo 	if (ret < 0)
285673a394bSEric Anholt 		return ret;
2862e928815STejun Heo 	*handlep = ret;
287673a394bSEric Anholt 
288673a394bSEric Anholt 	drm_gem_object_handle_reference(obj);
289304eda32SBen Skeggs 
290304eda32SBen Skeggs 	if (dev->driver->gem_open_object) {
291304eda32SBen Skeggs 		ret = dev->driver->gem_open_object(obj, file_priv);
292304eda32SBen Skeggs 		if (ret) {
293304eda32SBen Skeggs 			drm_gem_handle_delete(file_priv, *handlep);
294304eda32SBen Skeggs 			return ret;
295304eda32SBen Skeggs 		}
296304eda32SBen Skeggs 	}
297304eda32SBen Skeggs 
298673a394bSEric Anholt 	return 0;
299673a394bSEric Anholt }
300673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_handle_create);
301673a394bSEric Anholt 
30275ef8b3bSRob Clark 
30375ef8b3bSRob Clark /**
30475ef8b3bSRob Clark  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
30575ef8b3bSRob Clark  * @obj: obj in question
30675ef8b3bSRob Clark  *
30775ef8b3bSRob Clark  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
30875ef8b3bSRob Clark  */
30975ef8b3bSRob Clark void
31075ef8b3bSRob Clark drm_gem_free_mmap_offset(struct drm_gem_object *obj)
31175ef8b3bSRob Clark {
31275ef8b3bSRob Clark 	struct drm_device *dev = obj->dev;
31375ef8b3bSRob Clark 	struct drm_gem_mm *mm = dev->mm_private;
31475ef8b3bSRob Clark 
3150de23977SDavid Herrmann 	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
31675ef8b3bSRob Clark }
31775ef8b3bSRob Clark EXPORT_SYMBOL(drm_gem_free_mmap_offset);
31875ef8b3bSRob Clark 
31975ef8b3bSRob Clark /**
320367bbd49SRob Clark  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
321367bbd49SRob Clark  * @obj: obj in question
322367bbd49SRob Clark  * @size: the virtual size
323367bbd49SRob Clark  *
324367bbd49SRob Clark  * GEM memory mapping works by handing back to userspace a fake mmap offset
325367bbd49SRob Clark  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
326367bbd49SRob Clark  * up the object based on the offset and sets up the various memory mapping
327367bbd49SRob Clark  * structures.
328367bbd49SRob Clark  *
329367bbd49SRob Clark  * This routine allocates and attaches a fake offset for @obj, in cases where
330367bbd49SRob Clark  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
331367bbd49SRob Clark  * just use drm_gem_create_mmap_offset().
332367bbd49SRob Clark  */
333367bbd49SRob Clark int
334367bbd49SRob Clark drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
335367bbd49SRob Clark {
336367bbd49SRob Clark 	struct drm_device *dev = obj->dev;
337367bbd49SRob Clark 	struct drm_gem_mm *mm = dev->mm_private;
338367bbd49SRob Clark 
339367bbd49SRob Clark 	return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
340367bbd49SRob Clark 				  size / PAGE_SIZE);
341367bbd49SRob Clark }
342367bbd49SRob Clark EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
343367bbd49SRob Clark 
344367bbd49SRob Clark /**
34575ef8b3bSRob Clark  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
34675ef8b3bSRob Clark  * @obj: obj in question
34775ef8b3bSRob Clark  *
34875ef8b3bSRob Clark  * GEM memory mapping works by handing back to userspace a fake mmap offset
34975ef8b3bSRob Clark  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
35075ef8b3bSRob Clark  * up the object based on the offset and sets up the various memory mapping
35175ef8b3bSRob Clark  * structures.
35275ef8b3bSRob Clark  *
35375ef8b3bSRob Clark  * This routine allocates and attaches a fake offset for @obj.
35475ef8b3bSRob Clark  */
355367bbd49SRob Clark int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
35675ef8b3bSRob Clark {
357367bbd49SRob Clark 	return drm_gem_create_mmap_offset_size(obj, obj->size);
35875ef8b3bSRob Clark }
35975ef8b3bSRob Clark EXPORT_SYMBOL(drm_gem_create_mmap_offset);
36075ef8b3bSRob Clark 
361*bcc5c9d5SRob Clark /**
362*bcc5c9d5SRob Clark  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
363*bcc5c9d5SRob Clark  * from shmem
364*bcc5c9d5SRob Clark  * @obj: obj in question
365*bcc5c9d5SRob Clark  * @gfpmask: gfp mask of requested pages
366*bcc5c9d5SRob Clark  */
367*bcc5c9d5SRob Clark struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
368*bcc5c9d5SRob Clark {
369*bcc5c9d5SRob Clark 	struct inode *inode;
370*bcc5c9d5SRob Clark 	struct address_space *mapping;
371*bcc5c9d5SRob Clark 	struct page *p, **pages;
372*bcc5c9d5SRob Clark 	int i, npages;
373*bcc5c9d5SRob Clark 
374*bcc5c9d5SRob Clark 	/* This is the shared memory object that backs the GEM resource */
375*bcc5c9d5SRob Clark 	inode = file_inode(obj->filp);
376*bcc5c9d5SRob Clark 	mapping = inode->i_mapping;
377*bcc5c9d5SRob Clark 
378*bcc5c9d5SRob Clark 	/* We already BUG_ON() for non-page-aligned sizes in
379*bcc5c9d5SRob Clark 	 * drm_gem_object_init(), so we should never hit this unless
380*bcc5c9d5SRob Clark 	 * driver author is doing something really wrong:
381*bcc5c9d5SRob Clark 	 */
382*bcc5c9d5SRob Clark 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
383*bcc5c9d5SRob Clark 
384*bcc5c9d5SRob Clark 	npages = obj->size >> PAGE_SHIFT;
385*bcc5c9d5SRob Clark 
386*bcc5c9d5SRob Clark 	pages = drm_malloc_ab(npages, sizeof(struct page *));
387*bcc5c9d5SRob Clark 	if (pages == NULL)
388*bcc5c9d5SRob Clark 		return ERR_PTR(-ENOMEM);
389*bcc5c9d5SRob Clark 
390*bcc5c9d5SRob Clark 	gfpmask |= mapping_gfp_mask(mapping);
391*bcc5c9d5SRob Clark 
392*bcc5c9d5SRob Clark 	for (i = 0; i < npages; i++) {
393*bcc5c9d5SRob Clark 		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
394*bcc5c9d5SRob Clark 		if (IS_ERR(p))
395*bcc5c9d5SRob Clark 			goto fail;
396*bcc5c9d5SRob Clark 		pages[i] = p;
397*bcc5c9d5SRob Clark 
398*bcc5c9d5SRob Clark 		/* There is a hypothetical issue w/ drivers that require
399*bcc5c9d5SRob Clark 		 * buffer memory in the low 4GB.. if the pages are un-
400*bcc5c9d5SRob Clark 		 * pinned, and swapped out, they can end up swapped back
401*bcc5c9d5SRob Clark 		 * in above 4GB.  If pages are already in memory, then
402*bcc5c9d5SRob Clark 		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
403*bcc5c9d5SRob Clark 		 * even if the already in-memory page disobeys the mask.
404*bcc5c9d5SRob Clark 		 *
405*bcc5c9d5SRob Clark 		 * It is only a theoretical issue today, because none of
406*bcc5c9d5SRob Clark 		 * the devices with this limitation can be populated with
407*bcc5c9d5SRob Clark 		 * enough memory to trigger the issue.  But this BUG_ON()
408*bcc5c9d5SRob Clark 		 * is here as a reminder in case the problem with
409*bcc5c9d5SRob Clark 		 * shmem_read_mapping_page_gfp() isn't solved by the time
410*bcc5c9d5SRob Clark 		 * it does become a real issue.
411*bcc5c9d5SRob Clark 		 *
412*bcc5c9d5SRob Clark 		 * See this thread: http://lkml.org/lkml/2011/7/11/238
413*bcc5c9d5SRob Clark 		 */
414*bcc5c9d5SRob Clark 		BUG_ON((gfpmask & __GFP_DMA32) &&
415*bcc5c9d5SRob Clark 				(page_to_pfn(p) >= 0x00100000UL));
416*bcc5c9d5SRob Clark 	}
417*bcc5c9d5SRob Clark 
418*bcc5c9d5SRob Clark 	return pages;
419*bcc5c9d5SRob Clark 
420*bcc5c9d5SRob Clark fail:
421*bcc5c9d5SRob Clark 	while (i--)
422*bcc5c9d5SRob Clark 		page_cache_release(pages[i]);
423*bcc5c9d5SRob Clark 
424*bcc5c9d5SRob Clark 	drm_free_large(pages);
425*bcc5c9d5SRob Clark 	return ERR_CAST(p);
426*bcc5c9d5SRob Clark }
427*bcc5c9d5SRob Clark EXPORT_SYMBOL(drm_gem_get_pages);
428*bcc5c9d5SRob Clark 
429*bcc5c9d5SRob Clark /**
430*bcc5c9d5SRob Clark  * drm_gem_put_pages - helper to free backing pages for a GEM object
431*bcc5c9d5SRob Clark  * @obj: obj in question
432*bcc5c9d5SRob Clark  * @pages: pages to free
433*bcc5c9d5SRob Clark  * @dirty: if true, pages will be marked as dirty
434*bcc5c9d5SRob Clark  * @accessed: if true, the pages will be marked as accessed
435*bcc5c9d5SRob Clark  */
436*bcc5c9d5SRob Clark void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
437*bcc5c9d5SRob Clark 		bool dirty, bool accessed)
438*bcc5c9d5SRob Clark {
439*bcc5c9d5SRob Clark 	int i, npages;
440*bcc5c9d5SRob Clark 
441*bcc5c9d5SRob Clark 	/* We already BUG_ON() for non-page-aligned sizes in
442*bcc5c9d5SRob Clark 	 * drm_gem_object_init(), so we should never hit this unless
443*bcc5c9d5SRob Clark 	 * driver author is doing something really wrong:
444*bcc5c9d5SRob Clark 	 */
445*bcc5c9d5SRob Clark 	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
446*bcc5c9d5SRob Clark 
447*bcc5c9d5SRob Clark 	npages = obj->size >> PAGE_SHIFT;
448*bcc5c9d5SRob Clark 
449*bcc5c9d5SRob Clark 	for (i = 0; i < npages; i++) {
450*bcc5c9d5SRob Clark 		if (dirty)
451*bcc5c9d5SRob Clark 			set_page_dirty(pages[i]);
452*bcc5c9d5SRob Clark 
453*bcc5c9d5SRob Clark 		if (accessed)
454*bcc5c9d5SRob Clark 			mark_page_accessed(pages[i]);
455*bcc5c9d5SRob Clark 
456*bcc5c9d5SRob Clark 		/* Undo the reference we took when populating the table */
457*bcc5c9d5SRob Clark 		page_cache_release(pages[i]);
458*bcc5c9d5SRob Clark 	}
459*bcc5c9d5SRob Clark 
460*bcc5c9d5SRob Clark 	drm_free_large(pages);
461*bcc5c9d5SRob Clark }
462*bcc5c9d5SRob Clark EXPORT_SYMBOL(drm_gem_put_pages);
463*bcc5c9d5SRob Clark 
464673a394bSEric Anholt /** Returns a reference to the object named by the handle. */
465673a394bSEric Anholt struct drm_gem_object *
466673a394bSEric Anholt drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
467a1a2d1d3SPekka Paalanen 		      u32 handle)
468673a394bSEric Anholt {
469673a394bSEric Anholt 	struct drm_gem_object *obj;
470673a394bSEric Anholt 
471673a394bSEric Anholt 	spin_lock(&filp->table_lock);
472673a394bSEric Anholt 
473673a394bSEric Anholt 	/* Check if we currently have a reference on the object */
474673a394bSEric Anholt 	obj = idr_find(&filp->object_idr, handle);
475673a394bSEric Anholt 	if (obj == NULL) {
476673a394bSEric Anholt 		spin_unlock(&filp->table_lock);
477673a394bSEric Anholt 		return NULL;
478673a394bSEric Anholt 	}
479673a394bSEric Anholt 
480673a394bSEric Anholt 	drm_gem_object_reference(obj);
481673a394bSEric Anholt 
482673a394bSEric Anholt 	spin_unlock(&filp->table_lock);
483673a394bSEric Anholt 
484673a394bSEric Anholt 	return obj;
485673a394bSEric Anholt }
486673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_lookup);
487673a394bSEric Anholt 
488673a394bSEric Anholt /**
489673a394bSEric Anholt  * Releases the handle to an mm object.
490673a394bSEric Anholt  */
491673a394bSEric Anholt int
492673a394bSEric Anholt drm_gem_close_ioctl(struct drm_device *dev, void *data,
493673a394bSEric Anholt 		    struct drm_file *file_priv)
494673a394bSEric Anholt {
495673a394bSEric Anholt 	struct drm_gem_close *args = data;
496673a394bSEric Anholt 	int ret;
497673a394bSEric Anholt 
498673a394bSEric Anholt 	if (!(dev->driver->driver_features & DRIVER_GEM))
499673a394bSEric Anholt 		return -ENODEV;
500673a394bSEric Anholt 
501673a394bSEric Anholt 	ret = drm_gem_handle_delete(file_priv, args->handle);
502673a394bSEric Anholt 
503673a394bSEric Anholt 	return ret;
504673a394bSEric Anholt }
505673a394bSEric Anholt 
506673a394bSEric Anholt /**
507673a394bSEric Anholt  * Create a global name for an object, returning the name.
508673a394bSEric Anholt  *
509673a394bSEric Anholt  * Note that the name does not hold a reference; when the object
510673a394bSEric Anholt  * is freed, the name goes away.
511673a394bSEric Anholt  */
512673a394bSEric Anholt int
513673a394bSEric Anholt drm_gem_flink_ioctl(struct drm_device *dev, void *data,
514673a394bSEric Anholt 		    struct drm_file *file_priv)
515673a394bSEric Anholt {
516673a394bSEric Anholt 	struct drm_gem_flink *args = data;
517673a394bSEric Anholt 	struct drm_gem_object *obj;
518673a394bSEric Anholt 	int ret;
519673a394bSEric Anholt 
520673a394bSEric Anholt 	if (!(dev->driver->driver_features & DRIVER_GEM))
521673a394bSEric Anholt 		return -ENODEV;
522673a394bSEric Anholt 
523673a394bSEric Anholt 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
524673a394bSEric Anholt 	if (obj == NULL)
525bf79cb91SChris Wilson 		return -ENOENT;
526673a394bSEric Anholt 
5272e928815STejun Heo 	idr_preload(GFP_KERNEL);
528673a394bSEric Anholt 	spin_lock(&dev->object_name_lock);
5298d59bae5SChris Wilson 	if (!obj->name) {
5302e928815STejun Heo 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
5312e928815STejun Heo 		if (ret < 0)
5323e49c4f4SChris Wilson 			goto err;
5332e07fb22SYoungJun Cho 
5342e07fb22SYoungJun Cho 		obj->name = ret;
535673a394bSEric Anholt 
5368d59bae5SChris Wilson 		/* Allocate a reference for the name table.  */
5378d59bae5SChris Wilson 		drm_gem_object_reference(obj);
5388d59bae5SChris Wilson 	}
5393e49c4f4SChris Wilson 
5402e07fb22SYoungJun Cho 	args->name = (uint64_t) obj->name;
5412e07fb22SYoungJun Cho 	ret = 0;
5422e07fb22SYoungJun Cho 
5433e49c4f4SChris Wilson err:
5442e07fb22SYoungJun Cho 	spin_unlock(&dev->object_name_lock);
5452e07fb22SYoungJun Cho 	idr_preload_end();
546bc9025bdSLuca Barbieri 	drm_gem_object_unreference_unlocked(obj);
5473e49c4f4SChris Wilson 	return ret;
548673a394bSEric Anholt }
549673a394bSEric Anholt 
550673a394bSEric Anholt /**
551673a394bSEric Anholt  * Open an object using the global name, returning a handle and the size.
552673a394bSEric Anholt  *
553673a394bSEric Anholt  * This handle (of course) holds a reference to the object, so the object
554673a394bSEric Anholt  * will not go away until the handle is deleted.
555673a394bSEric Anholt  */
556673a394bSEric Anholt int
557673a394bSEric Anholt drm_gem_open_ioctl(struct drm_device *dev, void *data,
558673a394bSEric Anholt 		   struct drm_file *file_priv)
559673a394bSEric Anholt {
560673a394bSEric Anholt 	struct drm_gem_open *args = data;
561673a394bSEric Anholt 	struct drm_gem_object *obj;
562673a394bSEric Anholt 	int ret;
563a1a2d1d3SPekka Paalanen 	u32 handle;
564673a394bSEric Anholt 
565673a394bSEric Anholt 	if (!(dev->driver->driver_features & DRIVER_GEM))
566673a394bSEric Anholt 		return -ENODEV;
567673a394bSEric Anholt 
568673a394bSEric Anholt 	spin_lock(&dev->object_name_lock);
569673a394bSEric Anholt 	obj = idr_find(&dev->object_name_idr, (int) args->name);
570673a394bSEric Anholt 	if (obj)
571673a394bSEric Anholt 		drm_gem_object_reference(obj);
572673a394bSEric Anholt 	spin_unlock(&dev->object_name_lock);
573673a394bSEric Anholt 	if (!obj)
574673a394bSEric Anholt 		return -ENOENT;
575673a394bSEric Anholt 
576673a394bSEric Anholt 	ret = drm_gem_handle_create(file_priv, obj, &handle);
577bc9025bdSLuca Barbieri 	drm_gem_object_unreference_unlocked(obj);
578673a394bSEric Anholt 	if (ret)
579673a394bSEric Anholt 		return ret;
580673a394bSEric Anholt 
581673a394bSEric Anholt 	args->handle = handle;
582673a394bSEric Anholt 	args->size = obj->size;
583673a394bSEric Anholt 
584673a394bSEric Anholt 	return 0;
585673a394bSEric Anholt }
586673a394bSEric Anholt 
587673a394bSEric Anholt /**
588673a394bSEric Anholt  * Called at device open time, sets up the structure for handling refcounting
589673a394bSEric Anholt  * of mm objects.
590673a394bSEric Anholt  */
591673a394bSEric Anholt void
592673a394bSEric Anholt drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
593673a394bSEric Anholt {
594673a394bSEric Anholt 	idr_init(&file_private->object_idr);
595673a394bSEric Anholt 	spin_lock_init(&file_private->table_lock);
596673a394bSEric Anholt }
597673a394bSEric Anholt 
598673a394bSEric Anholt /**
599673a394bSEric Anholt  * Called at device close to release the file's
600673a394bSEric Anholt  * handle references on objects.
601673a394bSEric Anholt  */
602673a394bSEric Anholt static int
603673a394bSEric Anholt drm_gem_object_release_handle(int id, void *ptr, void *data)
604673a394bSEric Anholt {
605304eda32SBen Skeggs 	struct drm_file *file_priv = data;
606673a394bSEric Anholt 	struct drm_gem_object *obj = ptr;
607304eda32SBen Skeggs 	struct drm_device *dev = obj->dev;
608304eda32SBen Skeggs 
6090ff926c7SDave Airlie 	drm_gem_remove_prime_handles(obj, file_priv);
6103248877eSDave Airlie 
611304eda32SBen Skeggs 	if (dev->driver->gem_close_object)
612304eda32SBen Skeggs 		dev->driver->gem_close_object(obj, file_priv);
613673a394bSEric Anholt 
614bc9025bdSLuca Barbieri 	drm_gem_object_handle_unreference_unlocked(obj);
615673a394bSEric Anholt 
616673a394bSEric Anholt 	return 0;
617673a394bSEric Anholt }
618673a394bSEric Anholt 
619673a394bSEric Anholt /**
620673a394bSEric Anholt  * Called at close time when the filp is going away.
621673a394bSEric Anholt  *
622673a394bSEric Anholt  * Releases any remaining references on objects by this filp.
623673a394bSEric Anholt  */
624673a394bSEric Anholt void
625673a394bSEric Anholt drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
626673a394bSEric Anholt {
627673a394bSEric Anholt 	idr_for_each(&file_private->object_idr,
628304eda32SBen Skeggs 		     &drm_gem_object_release_handle, file_private);
629673a394bSEric Anholt 	idr_destroy(&file_private->object_idr);
630673a394bSEric Anholt }
631673a394bSEric Anholt 
632fd632aa3SDaniel Vetter void
633fd632aa3SDaniel Vetter drm_gem_object_release(struct drm_gem_object *obj)
634c3ae90c0SLuca Barbieri {
63562cb7011SAlan Cox 	if (obj->filp)
636c3ae90c0SLuca Barbieri 	    fput(obj->filp);
637c3ae90c0SLuca Barbieri }
638fd632aa3SDaniel Vetter EXPORT_SYMBOL(drm_gem_object_release);
639c3ae90c0SLuca Barbieri 
640673a394bSEric Anholt /**
641673a394bSEric Anholt  * Called after the last reference to the object has been lost.
642c3ae90c0SLuca Barbieri  * Must be called holding struct_ mutex
643673a394bSEric Anholt  *
644673a394bSEric Anholt  * Frees the object
645673a394bSEric Anholt  */
646673a394bSEric Anholt void
647673a394bSEric Anholt drm_gem_object_free(struct kref *kref)
648673a394bSEric Anholt {
649673a394bSEric Anholt 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
650673a394bSEric Anholt 	struct drm_device *dev = obj->dev;
651673a394bSEric Anholt 
652673a394bSEric Anholt 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
653673a394bSEric Anholt 
654673a394bSEric Anholt 	if (dev->driver->gem_free_object != NULL)
655673a394bSEric Anholt 		dev->driver->gem_free_object(obj);
656673a394bSEric Anholt }
657673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_free);
658673a394bSEric Anholt 
659c3ae90c0SLuca Barbieri static void drm_gem_object_ref_bug(struct kref *list_kref)
660c3ae90c0SLuca Barbieri {
661c3ae90c0SLuca Barbieri 	BUG();
662c3ae90c0SLuca Barbieri }
663c3ae90c0SLuca Barbieri 
664c3ae90c0SLuca Barbieri /**
665673a394bSEric Anholt  * Called after the last handle to the object has been closed
666673a394bSEric Anholt  *
667673a394bSEric Anholt  * Removes any name for the object. Note that this must be
668673a394bSEric Anholt  * called before drm_gem_object_free or we'll be touching
669673a394bSEric Anholt  * freed memory
670673a394bSEric Anholt  */
67129d08b3eSDave Airlie void drm_gem_object_handle_free(struct drm_gem_object *obj)
672673a394bSEric Anholt {
673673a394bSEric Anholt 	struct drm_device *dev = obj->dev;
674673a394bSEric Anholt 
675673a394bSEric Anholt 	/* Remove any name for this object */
676673a394bSEric Anholt 	spin_lock(&dev->object_name_lock);
677673a394bSEric Anholt 	if (obj->name) {
678673a394bSEric Anholt 		idr_remove(&dev->object_name_idr, obj->name);
6798d59bae5SChris Wilson 		obj->name = 0;
680673a394bSEric Anholt 		spin_unlock(&dev->object_name_lock);
681673a394bSEric Anholt 		/*
682673a394bSEric Anholt 		 * The object name held a reference to this object, drop
683673a394bSEric Anholt 		 * that now.
684c3ae90c0SLuca Barbieri 		*
685c3ae90c0SLuca Barbieri 		* This cannot be the last reference, since the handle holds one too.
686673a394bSEric Anholt 		 */
687c3ae90c0SLuca Barbieri 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
688673a394bSEric Anholt 	} else
689673a394bSEric Anholt 		spin_unlock(&dev->object_name_lock);
690673a394bSEric Anholt 
691673a394bSEric Anholt }
692673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_handle_free);
693673a394bSEric Anholt 
694ab00b3e5SJesse Barnes void drm_gem_vm_open(struct vm_area_struct *vma)
695ab00b3e5SJesse Barnes {
696ab00b3e5SJesse Barnes 	struct drm_gem_object *obj = vma->vm_private_data;
697ab00b3e5SJesse Barnes 
698ab00b3e5SJesse Barnes 	drm_gem_object_reference(obj);
69931dfbc93SChris Wilson 
70031dfbc93SChris Wilson 	mutex_lock(&obj->dev->struct_mutex);
701b06d66beSRob Clark 	drm_vm_open_locked(obj->dev, vma);
70231dfbc93SChris Wilson 	mutex_unlock(&obj->dev->struct_mutex);
703ab00b3e5SJesse Barnes }
704ab00b3e5SJesse Barnes EXPORT_SYMBOL(drm_gem_vm_open);
705ab00b3e5SJesse Barnes 
706ab00b3e5SJesse Barnes void drm_gem_vm_close(struct vm_area_struct *vma)
707ab00b3e5SJesse Barnes {
708ab00b3e5SJesse Barnes 	struct drm_gem_object *obj = vma->vm_private_data;
709b74ad5aeSChris Wilson 	struct drm_device *dev = obj->dev;
710ab00b3e5SJesse Barnes 
711b74ad5aeSChris Wilson 	mutex_lock(&dev->struct_mutex);
712b06d66beSRob Clark 	drm_vm_close_locked(obj->dev, vma);
71331dfbc93SChris Wilson 	drm_gem_object_unreference(obj);
714b74ad5aeSChris Wilson 	mutex_unlock(&dev->struct_mutex);
715ab00b3e5SJesse Barnes }
716ab00b3e5SJesse Barnes EXPORT_SYMBOL(drm_gem_vm_close);
717ab00b3e5SJesse Barnes 
7181c5aafa6SLaurent Pinchart /**
7191c5aafa6SLaurent Pinchart  * drm_gem_mmap_obj - memory map a GEM object
7201c5aafa6SLaurent Pinchart  * @obj: the GEM object to map
7211c5aafa6SLaurent Pinchart  * @obj_size: the object size to be mapped, in bytes
7221c5aafa6SLaurent Pinchart  * @vma: VMA for the area to be mapped
7231c5aafa6SLaurent Pinchart  *
7241c5aafa6SLaurent Pinchart  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
7251c5aafa6SLaurent Pinchart  * provided by the driver. Depending on their requirements, drivers can either
7261c5aafa6SLaurent Pinchart  * provide a fault handler in their gem_vm_ops (in which case any accesses to
7271c5aafa6SLaurent Pinchart  * the object will be trapped, to perform migration, GTT binding, surface
7281c5aafa6SLaurent Pinchart  * register allocation, or performance monitoring), or mmap the buffer memory
7291c5aafa6SLaurent Pinchart  * synchronously after calling drm_gem_mmap_obj.
7301c5aafa6SLaurent Pinchart  *
7311c5aafa6SLaurent Pinchart  * This function is mainly intended to implement the DMABUF mmap operation, when
7321c5aafa6SLaurent Pinchart  * the GEM object is not looked up based on its fake offset. To implement the
7331c5aafa6SLaurent Pinchart  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
7341c5aafa6SLaurent Pinchart  *
7354368dd84SYoungJun Cho  * NOTE: This function has to be protected with dev->struct_mutex
7364368dd84SYoungJun Cho  *
7371c5aafa6SLaurent Pinchart  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
7381c5aafa6SLaurent Pinchart  * size, or if no gem_vm_ops are provided.
7391c5aafa6SLaurent Pinchart  */
7401c5aafa6SLaurent Pinchart int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
7411c5aafa6SLaurent Pinchart 		     struct vm_area_struct *vma)
7421c5aafa6SLaurent Pinchart {
7431c5aafa6SLaurent Pinchart 	struct drm_device *dev = obj->dev;
7441c5aafa6SLaurent Pinchart 
7454368dd84SYoungJun Cho 	lockdep_assert_held(&dev->struct_mutex);
7464368dd84SYoungJun Cho 
7471c5aafa6SLaurent Pinchart 	/* Check for valid size. */
7481c5aafa6SLaurent Pinchart 	if (obj_size < vma->vm_end - vma->vm_start)
7491c5aafa6SLaurent Pinchart 		return -EINVAL;
7501c5aafa6SLaurent Pinchart 
7511c5aafa6SLaurent Pinchart 	if (!dev->driver->gem_vm_ops)
7521c5aafa6SLaurent Pinchart 		return -EINVAL;
7531c5aafa6SLaurent Pinchart 
7541c5aafa6SLaurent Pinchart 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
7551c5aafa6SLaurent Pinchart 	vma->vm_ops = dev->driver->gem_vm_ops;
7561c5aafa6SLaurent Pinchart 	vma->vm_private_data = obj;
7571c5aafa6SLaurent Pinchart 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
7581c5aafa6SLaurent Pinchart 
7591c5aafa6SLaurent Pinchart 	/* Take a ref for this mapping of the object, so that the fault
7601c5aafa6SLaurent Pinchart 	 * handler can dereference the mmap offset's pointer to the object.
7611c5aafa6SLaurent Pinchart 	 * This reference is cleaned up by the corresponding vm_close
7621c5aafa6SLaurent Pinchart 	 * (which should happen whether the vma was created by this call, or
7631c5aafa6SLaurent Pinchart 	 * by a vm_open due to mremap or partial unmap or whatever).
7641c5aafa6SLaurent Pinchart 	 */
7651c5aafa6SLaurent Pinchart 	drm_gem_object_reference(obj);
7661c5aafa6SLaurent Pinchart 
7671c5aafa6SLaurent Pinchart 	drm_vm_open_locked(dev, vma);
7681c5aafa6SLaurent Pinchart 	return 0;
7691c5aafa6SLaurent Pinchart }
7701c5aafa6SLaurent Pinchart EXPORT_SYMBOL(drm_gem_mmap_obj);
771ab00b3e5SJesse Barnes 
772a2c0a97bSJesse Barnes /**
773a2c0a97bSJesse Barnes  * drm_gem_mmap - memory map routine for GEM objects
774a2c0a97bSJesse Barnes  * @filp: DRM file pointer
775a2c0a97bSJesse Barnes  * @vma: VMA for the area to be mapped
776a2c0a97bSJesse Barnes  *
777a2c0a97bSJesse Barnes  * If a driver supports GEM object mapping, mmap calls on the DRM file
778a2c0a97bSJesse Barnes  * descriptor will end up here.
779a2c0a97bSJesse Barnes  *
7801c5aafa6SLaurent Pinchart  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
781a2c0a97bSJesse Barnes  * contain the fake offset we created when the GTT map ioctl was called on
7821c5aafa6SLaurent Pinchart  * the object) and map it with a call to drm_gem_mmap_obj().
783a2c0a97bSJesse Barnes  */
784a2c0a97bSJesse Barnes int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
785a2c0a97bSJesse Barnes {
786a2c0a97bSJesse Barnes 	struct drm_file *priv = filp->private_data;
787a2c0a97bSJesse Barnes 	struct drm_device *dev = priv->minor->dev;
788a2c0a97bSJesse Barnes 	struct drm_gem_mm *mm = dev->mm_private;
7890de23977SDavid Herrmann 	struct drm_gem_object *obj;
7900de23977SDavid Herrmann 	struct drm_vma_offset_node *node;
791a2c0a97bSJesse Barnes 	int ret = 0;
792a2c0a97bSJesse Barnes 
7932c07a21dSDave Airlie 	if (drm_device_is_unplugged(dev))
7942c07a21dSDave Airlie 		return -ENODEV;
7952c07a21dSDave Airlie 
796a2c0a97bSJesse Barnes 	mutex_lock(&dev->struct_mutex);
797a2c0a97bSJesse Barnes 
7980de23977SDavid Herrmann 	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
7990de23977SDavid Herrmann 					   vma_pages(vma));
8000de23977SDavid Herrmann 	if (!node) {
801a2c0a97bSJesse Barnes 		mutex_unlock(&dev->struct_mutex);
802a2c0a97bSJesse Barnes 		return drm_mmap(filp, vma);
803a2c0a97bSJesse Barnes 	}
804a2c0a97bSJesse Barnes 
8050de23977SDavid Herrmann 	obj = container_of(node, struct drm_gem_object, vma_node);
806aed2c03cSDavid Herrmann 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
807a2c0a97bSJesse Barnes 
808a2c0a97bSJesse Barnes 	mutex_unlock(&dev->struct_mutex);
809a2c0a97bSJesse Barnes 
810a2c0a97bSJesse Barnes 	return ret;
811a2c0a97bSJesse Barnes }
812a2c0a97bSJesse Barnes EXPORT_SYMBOL(drm_gem_mmap);
813