1673a394bSEric Anholt /* 2673a394bSEric Anholt * Copyright © 2008 Intel Corporation 3673a394bSEric Anholt * 4673a394bSEric Anholt * Permission is hereby granted, free of charge, to any person obtaining a 5673a394bSEric Anholt * copy of this software and associated documentation files (the "Software"), 6673a394bSEric Anholt * to deal in the Software without restriction, including without limitation 7673a394bSEric Anholt * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8673a394bSEric Anholt * and/or sell copies of the Software, and to permit persons to whom the 9673a394bSEric Anholt * Software is furnished to do so, subject to the following conditions: 10673a394bSEric Anholt * 11673a394bSEric Anholt * The above copyright notice and this permission notice (including the next 12673a394bSEric Anholt * paragraph) shall be included in all copies or substantial portions of the 13673a394bSEric Anholt * Software. 14673a394bSEric Anholt * 15673a394bSEric Anholt * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16673a394bSEric Anholt * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17673a394bSEric Anholt * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18673a394bSEric Anholt * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19673a394bSEric Anholt * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20673a394bSEric Anholt * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21673a394bSEric Anholt * IN THE SOFTWARE. 22673a394bSEric Anholt * 23673a394bSEric Anholt * Authors: 24673a394bSEric Anholt * Eric Anholt <eric@anholt.net> 25673a394bSEric Anholt * 26673a394bSEric Anholt */ 27673a394bSEric Anholt 28673a394bSEric Anholt #include <linux/types.h> 29673a394bSEric Anholt #include <linux/slab.h> 30673a394bSEric Anholt #include <linux/mm.h> 31673a394bSEric Anholt #include <linux/uaccess.h> 32673a394bSEric Anholt #include <linux/fs.h> 33673a394bSEric Anholt #include <linux/file.h> 34673a394bSEric Anholt #include <linux/module.h> 35673a394bSEric Anholt #include <linux/mman.h> 36673a394bSEric Anholt #include <linux/pagemap.h> 375949eac4SHugh Dickins #include <linux/shmem_fs.h> 383248877eSDave Airlie #include <linux/dma-buf.h> 39760285e7SDavid Howells #include <drm/drmP.h> 400de23977SDavid Herrmann #include <drm/drm_vma_manager.h> 41673a394bSEric Anholt 42673a394bSEric Anholt /** @file drm_gem.c 43673a394bSEric Anholt * 44673a394bSEric Anholt * This file provides some of the base ioctls and library routines for 45673a394bSEric Anholt * the graphics memory manager implemented by each device driver. 46673a394bSEric Anholt * 47673a394bSEric Anholt * Because various devices have different requirements in terms of 48673a394bSEric Anholt * synchronization and migration strategies, implementing that is left up to 49673a394bSEric Anholt * the driver, and all that the general API provides should be generic -- 50673a394bSEric Anholt * allocating objects, reading/writing data with the cpu, freeing objects. 51673a394bSEric Anholt * Even there, platform-dependent optimizations for reading/writing data with 52673a394bSEric Anholt * the CPU mean we'll likely hook those out to driver-specific calls. However, 53673a394bSEric Anholt * the DRI2 implementation wants to have at least allocate/mmap be generic. 54673a394bSEric Anholt * 55673a394bSEric Anholt * The goal was to have swap-backed object allocation managed through 56673a394bSEric Anholt * struct file. However, file descriptors as handles to a struct file have 57673a394bSEric Anholt * two major failings: 58673a394bSEric Anholt * - Process limits prevent more than 1024 or so being used at a time by 59673a394bSEric Anholt * default. 60673a394bSEric Anholt * - Inability to allocate high fds will aggravate the X Server's select() 61673a394bSEric Anholt * handling, and likely that of many GL client applications as well. 62673a394bSEric Anholt * 63673a394bSEric Anholt * This led to a plan of using our own integer IDs (called handles, following 64673a394bSEric Anholt * DRM terminology) to mimic fds, and implement the fd syscalls we need as 65673a394bSEric Anholt * ioctls. The objects themselves will still include the struct file so 66673a394bSEric Anholt * that we can transition to fds if the required kernel infrastructure shows 67673a394bSEric Anholt * up at a later date, and as our interface with shmfs for memory allocation. 68673a394bSEric Anholt */ 69673a394bSEric Anholt 70a2c0a97bSJesse Barnes /* 71a2c0a97bSJesse Barnes * We make up offsets for buffer objects so we can recognize them at 72a2c0a97bSJesse Barnes * mmap time. 73a2c0a97bSJesse Barnes */ 7405269a3aSJordan Crouse 7505269a3aSJordan Crouse /* pgoff in mmap is an unsigned long, so we need to make sure that 7605269a3aSJordan Crouse * the faked up offset will fit 7705269a3aSJordan Crouse */ 7805269a3aSJordan Crouse 7905269a3aSJordan Crouse #if BITS_PER_LONG == 64 80a2c0a97bSJesse Barnes #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 81a2c0a97bSJesse Barnes #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 8205269a3aSJordan Crouse #else 8305269a3aSJordan Crouse #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 8405269a3aSJordan Crouse #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 8505269a3aSJordan Crouse #endif 86a2c0a97bSJesse Barnes 87673a394bSEric Anholt /** 88673a394bSEric Anholt * Initialize the GEM device fields 89673a394bSEric Anholt */ 90673a394bSEric Anholt 91673a394bSEric Anholt int 92673a394bSEric Anholt drm_gem_init(struct drm_device *dev) 93673a394bSEric Anholt { 94a2c0a97bSJesse Barnes struct drm_gem_mm *mm; 95a2c0a97bSJesse Barnes 96cd4f013fSDaniel Vetter mutex_init(&dev->object_name_lock); 97673a394bSEric Anholt idr_init(&dev->object_name_idr); 98a2c0a97bSJesse Barnes 999a298b2aSEric Anholt mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 100a2c0a97bSJesse Barnes if (!mm) { 101a2c0a97bSJesse Barnes DRM_ERROR("out of memory\n"); 102a2c0a97bSJesse Barnes return -ENOMEM; 103a2c0a97bSJesse Barnes } 104a2c0a97bSJesse Barnes 105a2c0a97bSJesse Barnes dev->mm_private = mm; 1060de23977SDavid Herrmann drm_vma_offset_manager_init(&mm->vma_manager, 1070de23977SDavid Herrmann DRM_FILE_PAGE_OFFSET_START, 10877ef8bbcSDavid Herrmann DRM_FILE_PAGE_OFFSET_SIZE); 109a2c0a97bSJesse Barnes 110673a394bSEric Anholt return 0; 111673a394bSEric Anholt } 112673a394bSEric Anholt 113a2c0a97bSJesse Barnes void 114a2c0a97bSJesse Barnes drm_gem_destroy(struct drm_device *dev) 115a2c0a97bSJesse Barnes { 116a2c0a97bSJesse Barnes struct drm_gem_mm *mm = dev->mm_private; 117a2c0a97bSJesse Barnes 1180de23977SDavid Herrmann drm_vma_offset_manager_destroy(&mm->vma_manager); 1199a298b2aSEric Anholt kfree(mm); 120a2c0a97bSJesse Barnes dev->mm_private = NULL; 121a2c0a97bSJesse Barnes } 122a2c0a97bSJesse Barnes 123673a394bSEric Anholt /** 12462cb7011SAlan Cox * Initialize an already allocated GEM object of the specified size with 1251d397043SDaniel Vetter * shmfs backing store. 1261d397043SDaniel Vetter */ 1271d397043SDaniel Vetter int drm_gem_object_init(struct drm_device *dev, 1281d397043SDaniel Vetter struct drm_gem_object *obj, size_t size) 1291d397043SDaniel Vetter { 13089c8233fSDavid Herrmann struct file *filp; 1311d397043SDaniel Vetter 13289c8233fSDavid Herrmann filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 13389c8233fSDavid Herrmann if (IS_ERR(filp)) 13489c8233fSDavid Herrmann return PTR_ERR(filp); 1351d397043SDaniel Vetter 13689c8233fSDavid Herrmann drm_gem_private_object_init(dev, obj, size); 13789c8233fSDavid Herrmann obj->filp = filp; 1381d397043SDaniel Vetter 1391d397043SDaniel Vetter return 0; 1401d397043SDaniel Vetter } 1411d397043SDaniel Vetter EXPORT_SYMBOL(drm_gem_object_init); 1421d397043SDaniel Vetter 1431d397043SDaniel Vetter /** 14462cb7011SAlan Cox * Initialize an already allocated GEM object of the specified size with 14562cb7011SAlan Cox * no GEM provided backing store. Instead the caller is responsible for 14662cb7011SAlan Cox * backing the object and handling it. 14762cb7011SAlan Cox */ 14889c8233fSDavid Herrmann void drm_gem_private_object_init(struct drm_device *dev, 14962cb7011SAlan Cox struct drm_gem_object *obj, size_t size) 15062cb7011SAlan Cox { 15162cb7011SAlan Cox BUG_ON((size & (PAGE_SIZE - 1)) != 0); 15262cb7011SAlan Cox 15362cb7011SAlan Cox obj->dev = dev; 15462cb7011SAlan Cox obj->filp = NULL; 15562cb7011SAlan Cox 15662cb7011SAlan Cox kref_init(&obj->refcount); 157a8e11d1cSDaniel Vetter obj->handle_count = 0; 15862cb7011SAlan Cox obj->size = size; 15962cb7011SAlan Cox } 16062cb7011SAlan Cox EXPORT_SYMBOL(drm_gem_private_object_init); 16162cb7011SAlan Cox 16262cb7011SAlan Cox /** 163673a394bSEric Anholt * Allocate a GEM object of the specified size with shmfs backing store 164673a394bSEric Anholt */ 165673a394bSEric Anholt struct drm_gem_object * 166673a394bSEric Anholt drm_gem_object_alloc(struct drm_device *dev, size_t size) 167673a394bSEric Anholt { 168673a394bSEric Anholt struct drm_gem_object *obj; 169673a394bSEric Anholt 170b798b1feSRobert P. J. Day obj = kzalloc(sizeof(*obj), GFP_KERNEL); 171845792d9SJiri Slaby if (!obj) 172845792d9SJiri Slaby goto free; 173673a394bSEric Anholt 1741d397043SDaniel Vetter if (drm_gem_object_init(dev, obj, size) != 0) 175845792d9SJiri Slaby goto free; 176673a394bSEric Anholt 177673a394bSEric Anholt if (dev->driver->gem_init_object != NULL && 178673a394bSEric Anholt dev->driver->gem_init_object(obj) != 0) { 179845792d9SJiri Slaby goto fput; 180673a394bSEric Anholt } 181673a394bSEric Anholt return obj; 182845792d9SJiri Slaby fput: 1831d397043SDaniel Vetter /* Object_init mangles the global counters - readjust them. */ 184845792d9SJiri Slaby fput(obj->filp); 185845792d9SJiri Slaby free: 186845792d9SJiri Slaby kfree(obj); 187845792d9SJiri Slaby return NULL; 188673a394bSEric Anholt } 189673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_alloc); 190673a394bSEric Anholt 1910ff926c7SDave Airlie static void 1920ff926c7SDave Airlie drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 1930ff926c7SDave Airlie { 1940ff926c7SDave Airlie if (obj->import_attach) { 195219b4733SDave Airlie drm_prime_remove_buf_handle(&filp->prime, 1960ff926c7SDave Airlie obj->import_attach->dmabuf); 1970ff926c7SDave Airlie } 1980ff926c7SDave Airlie if (obj->export_dma_buf) { 199219b4733SDave Airlie drm_prime_remove_buf_handle(&filp->prime, 2000ff926c7SDave Airlie obj->export_dma_buf); 2010ff926c7SDave Airlie } 2020ff926c7SDave Airlie } 2030ff926c7SDave Airlie 20436da5908SDaniel Vetter static void drm_gem_object_ref_bug(struct kref *list_kref) 20536da5908SDaniel Vetter { 20636da5908SDaniel Vetter BUG(); 20736da5908SDaniel Vetter } 20836da5908SDaniel Vetter 20936da5908SDaniel Vetter /** 21036da5908SDaniel Vetter * Called after the last handle to the object has been closed 21136da5908SDaniel Vetter * 21236da5908SDaniel Vetter * Removes any name for the object. Note that this must be 21336da5908SDaniel Vetter * called before drm_gem_object_free or we'll be touching 21436da5908SDaniel Vetter * freed memory 21536da5908SDaniel Vetter */ 21636da5908SDaniel Vetter static void drm_gem_object_handle_free(struct drm_gem_object *obj) 21736da5908SDaniel Vetter { 21836da5908SDaniel Vetter struct drm_device *dev = obj->dev; 21936da5908SDaniel Vetter 22036da5908SDaniel Vetter /* Remove any name for this object */ 22136da5908SDaniel Vetter if (obj->name) { 22236da5908SDaniel Vetter idr_remove(&dev->object_name_idr, obj->name); 22336da5908SDaniel Vetter obj->name = 0; 22436da5908SDaniel Vetter /* 22536da5908SDaniel Vetter * The object name held a reference to this object, drop 22636da5908SDaniel Vetter * that now. 22736da5908SDaniel Vetter * 22836da5908SDaniel Vetter * This cannot be the last reference, since the handle holds one too. 22936da5908SDaniel Vetter */ 23036da5908SDaniel Vetter kref_put(&obj->refcount, drm_gem_object_ref_bug); 231a8e11d1cSDaniel Vetter } 23236da5908SDaniel Vetter } 23336da5908SDaniel Vetter 234becee2a5SDaniel Vetter static void 23536da5908SDaniel Vetter drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 23636da5908SDaniel Vetter { 237a8e11d1cSDaniel Vetter if (WARN_ON(obj->handle_count == 0)) 23836da5908SDaniel Vetter return; 23936da5908SDaniel Vetter 24036da5908SDaniel Vetter /* 24136da5908SDaniel Vetter * Must bump handle count first as this may be the last 24236da5908SDaniel Vetter * ref, in which case the object would disappear before we 24336da5908SDaniel Vetter * checked for a name 24436da5908SDaniel Vetter */ 24536da5908SDaniel Vetter 246cd4f013fSDaniel Vetter mutex_lock(&obj->dev->object_name_lock); 247a8e11d1cSDaniel Vetter if (--obj->handle_count == 0) 24836da5908SDaniel Vetter drm_gem_object_handle_free(obj); 249cd4f013fSDaniel Vetter mutex_unlock(&obj->dev->object_name_lock); 250a8e11d1cSDaniel Vetter 25136da5908SDaniel Vetter drm_gem_object_unreference_unlocked(obj); 25236da5908SDaniel Vetter } 25336da5908SDaniel Vetter 254673a394bSEric Anholt /** 255673a394bSEric Anholt * Removes the mapping from handle to filp for this object. 256673a394bSEric Anholt */ 257ff72145bSDave Airlie int 258a1a2d1d3SPekka Paalanen drm_gem_handle_delete(struct drm_file *filp, u32 handle) 259673a394bSEric Anholt { 260673a394bSEric Anholt struct drm_device *dev; 261673a394bSEric Anholt struct drm_gem_object *obj; 262673a394bSEric Anholt 263673a394bSEric Anholt /* This is gross. The idr system doesn't let us try a delete and 264673a394bSEric Anholt * return an error code. It just spews if you fail at deleting. 265673a394bSEric Anholt * So, we have to grab a lock around finding the object and then 266673a394bSEric Anholt * doing the delete on it and dropping the refcount, or the user 267673a394bSEric Anholt * could race us to double-decrement the refcount and cause a 268673a394bSEric Anholt * use-after-free later. Given the frequency of our handle lookups, 269673a394bSEric Anholt * we may want to use ida for number allocation and a hash table 270673a394bSEric Anholt * for the pointers, anyway. 271673a394bSEric Anholt */ 272673a394bSEric Anholt spin_lock(&filp->table_lock); 273673a394bSEric Anholt 274673a394bSEric Anholt /* Check if we currently have a reference on the object */ 275673a394bSEric Anholt obj = idr_find(&filp->object_idr, handle); 276673a394bSEric Anholt if (obj == NULL) { 277673a394bSEric Anholt spin_unlock(&filp->table_lock); 278673a394bSEric Anholt return -EINVAL; 279673a394bSEric Anholt } 280673a394bSEric Anholt dev = obj->dev; 281673a394bSEric Anholt 282673a394bSEric Anholt /* Release reference and decrement refcount. */ 283673a394bSEric Anholt idr_remove(&filp->object_idr, handle); 284673a394bSEric Anholt spin_unlock(&filp->table_lock); 285673a394bSEric Anholt 2860ff926c7SDave Airlie drm_gem_remove_prime_handles(obj, filp); 2873248877eSDave Airlie 288304eda32SBen Skeggs if (dev->driver->gem_close_object) 289304eda32SBen Skeggs dev->driver->gem_close_object(obj, filp); 290bc9025bdSLuca Barbieri drm_gem_object_handle_unreference_unlocked(obj); 291673a394bSEric Anholt 292673a394bSEric Anholt return 0; 293673a394bSEric Anholt } 294ff72145bSDave Airlie EXPORT_SYMBOL(drm_gem_handle_delete); 295673a394bSEric Anholt 296673a394bSEric Anholt /** 29743387b37SDaniel Vetter * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 29843387b37SDaniel Vetter * 29943387b37SDaniel Vetter * This implements the ->dumb_destroy kms driver callback for drivers which use 30043387b37SDaniel Vetter * gem to manage their backing storage. 30143387b37SDaniel Vetter */ 30243387b37SDaniel Vetter int drm_gem_dumb_destroy(struct drm_file *file, 30343387b37SDaniel Vetter struct drm_device *dev, 30443387b37SDaniel Vetter uint32_t handle) 30543387b37SDaniel Vetter { 30643387b37SDaniel Vetter return drm_gem_handle_delete(file, handle); 30743387b37SDaniel Vetter } 30843387b37SDaniel Vetter EXPORT_SYMBOL(drm_gem_dumb_destroy); 30943387b37SDaniel Vetter 31043387b37SDaniel Vetter /** 311*20228c44SDaniel Vetter * drm_gem_handle_create_tail - internal functions to create a handle 312*20228c44SDaniel Vetter * 313*20228c44SDaniel Vetter * This expects the dev->object_name_lock to be held already and will drop it 314*20228c44SDaniel Vetter * before returning. Used to avoid races in establishing new handles when 315*20228c44SDaniel Vetter * importing an object from either an flink name or a dma-buf. 316673a394bSEric Anholt */ 317673a394bSEric Anholt int 318*20228c44SDaniel Vetter drm_gem_handle_create_tail(struct drm_file *file_priv, 319673a394bSEric Anholt struct drm_gem_object *obj, 320a1a2d1d3SPekka Paalanen u32 *handlep) 321673a394bSEric Anholt { 322304eda32SBen Skeggs struct drm_device *dev = obj->dev; 323673a394bSEric Anholt int ret; 324673a394bSEric Anholt 325*20228c44SDaniel Vetter WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 326*20228c44SDaniel Vetter 327673a394bSEric Anholt /* 3282e928815STejun Heo * Get the user-visible handle using idr. Preload and perform 3292e928815STejun Heo * allocation under our spinlock. 330673a394bSEric Anholt */ 3312e928815STejun Heo idr_preload(GFP_KERNEL); 332673a394bSEric Anholt spin_lock(&file_priv->table_lock); 3332e928815STejun Heo 3342e928815STejun Heo ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 335a8e11d1cSDaniel Vetter drm_gem_object_reference(obj); 336a8e11d1cSDaniel Vetter obj->handle_count++; 337673a394bSEric Anholt spin_unlock(&file_priv->table_lock); 3382e928815STejun Heo idr_preload_end(); 339cd4f013fSDaniel Vetter mutex_unlock(&dev->object_name_lock); 340a8e11d1cSDaniel Vetter if (ret < 0) { 341a8e11d1cSDaniel Vetter drm_gem_object_handle_unreference_unlocked(obj); 342673a394bSEric Anholt return ret; 343a8e11d1cSDaniel Vetter } 3442e928815STejun Heo *handlep = ret; 345673a394bSEric Anholt 346304eda32SBen Skeggs 347304eda32SBen Skeggs if (dev->driver->gem_open_object) { 348304eda32SBen Skeggs ret = dev->driver->gem_open_object(obj, file_priv); 349304eda32SBen Skeggs if (ret) { 350304eda32SBen Skeggs drm_gem_handle_delete(file_priv, *handlep); 351304eda32SBen Skeggs return ret; 352304eda32SBen Skeggs } 353304eda32SBen Skeggs } 354304eda32SBen Skeggs 355673a394bSEric Anholt return 0; 356673a394bSEric Anholt } 357*20228c44SDaniel Vetter 358*20228c44SDaniel Vetter /** 359*20228c44SDaniel Vetter * Create a handle for this object. This adds a handle reference 360*20228c44SDaniel Vetter * to the object, which includes a regular reference count. Callers 361*20228c44SDaniel Vetter * will likely want to dereference the object afterwards. 362*20228c44SDaniel Vetter */ 363*20228c44SDaniel Vetter int 364*20228c44SDaniel Vetter drm_gem_handle_create(struct drm_file *file_priv, 365*20228c44SDaniel Vetter struct drm_gem_object *obj, 366*20228c44SDaniel Vetter u32 *handlep) 367*20228c44SDaniel Vetter { 368*20228c44SDaniel Vetter mutex_lock(&obj->dev->object_name_lock); 369*20228c44SDaniel Vetter 370*20228c44SDaniel Vetter return drm_gem_handle_create_tail(file_priv, obj, handlep); 371*20228c44SDaniel Vetter } 372673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_handle_create); 373673a394bSEric Anholt 37475ef8b3bSRob Clark 37575ef8b3bSRob Clark /** 37675ef8b3bSRob Clark * drm_gem_free_mmap_offset - release a fake mmap offset for an object 37775ef8b3bSRob Clark * @obj: obj in question 37875ef8b3bSRob Clark * 37975ef8b3bSRob Clark * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 38075ef8b3bSRob Clark */ 38175ef8b3bSRob Clark void 38275ef8b3bSRob Clark drm_gem_free_mmap_offset(struct drm_gem_object *obj) 38375ef8b3bSRob Clark { 38475ef8b3bSRob Clark struct drm_device *dev = obj->dev; 38575ef8b3bSRob Clark struct drm_gem_mm *mm = dev->mm_private; 38675ef8b3bSRob Clark 3870de23977SDavid Herrmann drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); 38875ef8b3bSRob Clark } 38975ef8b3bSRob Clark EXPORT_SYMBOL(drm_gem_free_mmap_offset); 39075ef8b3bSRob Clark 39175ef8b3bSRob Clark /** 392367bbd49SRob Clark * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 393367bbd49SRob Clark * @obj: obj in question 394367bbd49SRob Clark * @size: the virtual size 395367bbd49SRob Clark * 396367bbd49SRob Clark * GEM memory mapping works by handing back to userspace a fake mmap offset 397367bbd49SRob Clark * it can use in a subsequent mmap(2) call. The DRM core code then looks 398367bbd49SRob Clark * up the object based on the offset and sets up the various memory mapping 399367bbd49SRob Clark * structures. 400367bbd49SRob Clark * 401367bbd49SRob Clark * This routine allocates and attaches a fake offset for @obj, in cases where 402367bbd49SRob Clark * the virtual size differs from the physical size (ie. obj->size). Otherwise 403367bbd49SRob Clark * just use drm_gem_create_mmap_offset(). 404367bbd49SRob Clark */ 405367bbd49SRob Clark int 406367bbd49SRob Clark drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 407367bbd49SRob Clark { 408367bbd49SRob Clark struct drm_device *dev = obj->dev; 409367bbd49SRob Clark struct drm_gem_mm *mm = dev->mm_private; 410367bbd49SRob Clark 411367bbd49SRob Clark return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, 412367bbd49SRob Clark size / PAGE_SIZE); 413367bbd49SRob Clark } 414367bbd49SRob Clark EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 415367bbd49SRob Clark 416367bbd49SRob Clark /** 41775ef8b3bSRob Clark * drm_gem_create_mmap_offset - create a fake mmap offset for an object 41875ef8b3bSRob Clark * @obj: obj in question 41975ef8b3bSRob Clark * 42075ef8b3bSRob Clark * GEM memory mapping works by handing back to userspace a fake mmap offset 42175ef8b3bSRob Clark * it can use in a subsequent mmap(2) call. The DRM core code then looks 42275ef8b3bSRob Clark * up the object based on the offset and sets up the various memory mapping 42375ef8b3bSRob Clark * structures. 42475ef8b3bSRob Clark * 42575ef8b3bSRob Clark * This routine allocates and attaches a fake offset for @obj. 42675ef8b3bSRob Clark */ 427367bbd49SRob Clark int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 42875ef8b3bSRob Clark { 429367bbd49SRob Clark return drm_gem_create_mmap_offset_size(obj, obj->size); 43075ef8b3bSRob Clark } 43175ef8b3bSRob Clark EXPORT_SYMBOL(drm_gem_create_mmap_offset); 43275ef8b3bSRob Clark 433bcc5c9d5SRob Clark /** 434bcc5c9d5SRob Clark * drm_gem_get_pages - helper to allocate backing pages for a GEM object 435bcc5c9d5SRob Clark * from shmem 436bcc5c9d5SRob Clark * @obj: obj in question 437bcc5c9d5SRob Clark * @gfpmask: gfp mask of requested pages 438bcc5c9d5SRob Clark */ 439bcc5c9d5SRob Clark struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 440bcc5c9d5SRob Clark { 441bcc5c9d5SRob Clark struct inode *inode; 442bcc5c9d5SRob Clark struct address_space *mapping; 443bcc5c9d5SRob Clark struct page *p, **pages; 444bcc5c9d5SRob Clark int i, npages; 445bcc5c9d5SRob Clark 446bcc5c9d5SRob Clark /* This is the shared memory object that backs the GEM resource */ 447bcc5c9d5SRob Clark inode = file_inode(obj->filp); 448bcc5c9d5SRob Clark mapping = inode->i_mapping; 449bcc5c9d5SRob Clark 450bcc5c9d5SRob Clark /* We already BUG_ON() for non-page-aligned sizes in 451bcc5c9d5SRob Clark * drm_gem_object_init(), so we should never hit this unless 452bcc5c9d5SRob Clark * driver author is doing something really wrong: 453bcc5c9d5SRob Clark */ 454bcc5c9d5SRob Clark WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 455bcc5c9d5SRob Clark 456bcc5c9d5SRob Clark npages = obj->size >> PAGE_SHIFT; 457bcc5c9d5SRob Clark 458bcc5c9d5SRob Clark pages = drm_malloc_ab(npages, sizeof(struct page *)); 459bcc5c9d5SRob Clark if (pages == NULL) 460bcc5c9d5SRob Clark return ERR_PTR(-ENOMEM); 461bcc5c9d5SRob Clark 462bcc5c9d5SRob Clark gfpmask |= mapping_gfp_mask(mapping); 463bcc5c9d5SRob Clark 464bcc5c9d5SRob Clark for (i = 0; i < npages; i++) { 465bcc5c9d5SRob Clark p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 466bcc5c9d5SRob Clark if (IS_ERR(p)) 467bcc5c9d5SRob Clark goto fail; 468bcc5c9d5SRob Clark pages[i] = p; 469bcc5c9d5SRob Clark 470bcc5c9d5SRob Clark /* There is a hypothetical issue w/ drivers that require 471bcc5c9d5SRob Clark * buffer memory in the low 4GB.. if the pages are un- 472bcc5c9d5SRob Clark * pinned, and swapped out, they can end up swapped back 473bcc5c9d5SRob Clark * in above 4GB. If pages are already in memory, then 474bcc5c9d5SRob Clark * shmem_read_mapping_page_gfp will ignore the gfpmask, 475bcc5c9d5SRob Clark * even if the already in-memory page disobeys the mask. 476bcc5c9d5SRob Clark * 477bcc5c9d5SRob Clark * It is only a theoretical issue today, because none of 478bcc5c9d5SRob Clark * the devices with this limitation can be populated with 479bcc5c9d5SRob Clark * enough memory to trigger the issue. But this BUG_ON() 480bcc5c9d5SRob Clark * is here as a reminder in case the problem with 481bcc5c9d5SRob Clark * shmem_read_mapping_page_gfp() isn't solved by the time 482bcc5c9d5SRob Clark * it does become a real issue. 483bcc5c9d5SRob Clark * 484bcc5c9d5SRob Clark * See this thread: http://lkml.org/lkml/2011/7/11/238 485bcc5c9d5SRob Clark */ 486bcc5c9d5SRob Clark BUG_ON((gfpmask & __GFP_DMA32) && 487bcc5c9d5SRob Clark (page_to_pfn(p) >= 0x00100000UL)); 488bcc5c9d5SRob Clark } 489bcc5c9d5SRob Clark 490bcc5c9d5SRob Clark return pages; 491bcc5c9d5SRob Clark 492bcc5c9d5SRob Clark fail: 493bcc5c9d5SRob Clark while (i--) 494bcc5c9d5SRob Clark page_cache_release(pages[i]); 495bcc5c9d5SRob Clark 496bcc5c9d5SRob Clark drm_free_large(pages); 497bcc5c9d5SRob Clark return ERR_CAST(p); 498bcc5c9d5SRob Clark } 499bcc5c9d5SRob Clark EXPORT_SYMBOL(drm_gem_get_pages); 500bcc5c9d5SRob Clark 501bcc5c9d5SRob Clark /** 502bcc5c9d5SRob Clark * drm_gem_put_pages - helper to free backing pages for a GEM object 503bcc5c9d5SRob Clark * @obj: obj in question 504bcc5c9d5SRob Clark * @pages: pages to free 505bcc5c9d5SRob Clark * @dirty: if true, pages will be marked as dirty 506bcc5c9d5SRob Clark * @accessed: if true, the pages will be marked as accessed 507bcc5c9d5SRob Clark */ 508bcc5c9d5SRob Clark void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 509bcc5c9d5SRob Clark bool dirty, bool accessed) 510bcc5c9d5SRob Clark { 511bcc5c9d5SRob Clark int i, npages; 512bcc5c9d5SRob Clark 513bcc5c9d5SRob Clark /* We already BUG_ON() for non-page-aligned sizes in 514bcc5c9d5SRob Clark * drm_gem_object_init(), so we should never hit this unless 515bcc5c9d5SRob Clark * driver author is doing something really wrong: 516bcc5c9d5SRob Clark */ 517bcc5c9d5SRob Clark WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 518bcc5c9d5SRob Clark 519bcc5c9d5SRob Clark npages = obj->size >> PAGE_SHIFT; 520bcc5c9d5SRob Clark 521bcc5c9d5SRob Clark for (i = 0; i < npages; i++) { 522bcc5c9d5SRob Clark if (dirty) 523bcc5c9d5SRob Clark set_page_dirty(pages[i]); 524bcc5c9d5SRob Clark 525bcc5c9d5SRob Clark if (accessed) 526bcc5c9d5SRob Clark mark_page_accessed(pages[i]); 527bcc5c9d5SRob Clark 528bcc5c9d5SRob Clark /* Undo the reference we took when populating the table */ 529bcc5c9d5SRob Clark page_cache_release(pages[i]); 530bcc5c9d5SRob Clark } 531bcc5c9d5SRob Clark 532bcc5c9d5SRob Clark drm_free_large(pages); 533bcc5c9d5SRob Clark } 534bcc5c9d5SRob Clark EXPORT_SYMBOL(drm_gem_put_pages); 535bcc5c9d5SRob Clark 536673a394bSEric Anholt /** Returns a reference to the object named by the handle. */ 537673a394bSEric Anholt struct drm_gem_object * 538673a394bSEric Anholt drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 539a1a2d1d3SPekka Paalanen u32 handle) 540673a394bSEric Anholt { 541673a394bSEric Anholt struct drm_gem_object *obj; 542673a394bSEric Anholt 543673a394bSEric Anholt spin_lock(&filp->table_lock); 544673a394bSEric Anholt 545673a394bSEric Anholt /* Check if we currently have a reference on the object */ 546673a394bSEric Anholt obj = idr_find(&filp->object_idr, handle); 547673a394bSEric Anholt if (obj == NULL) { 548673a394bSEric Anholt spin_unlock(&filp->table_lock); 549673a394bSEric Anholt return NULL; 550673a394bSEric Anholt } 551673a394bSEric Anholt 552673a394bSEric Anholt drm_gem_object_reference(obj); 553673a394bSEric Anholt 554673a394bSEric Anholt spin_unlock(&filp->table_lock); 555673a394bSEric Anholt 556673a394bSEric Anholt return obj; 557673a394bSEric Anholt } 558673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_lookup); 559673a394bSEric Anholt 560673a394bSEric Anholt /** 561673a394bSEric Anholt * Releases the handle to an mm object. 562673a394bSEric Anholt */ 563673a394bSEric Anholt int 564673a394bSEric Anholt drm_gem_close_ioctl(struct drm_device *dev, void *data, 565673a394bSEric Anholt struct drm_file *file_priv) 566673a394bSEric Anholt { 567673a394bSEric Anholt struct drm_gem_close *args = data; 568673a394bSEric Anholt int ret; 569673a394bSEric Anholt 570673a394bSEric Anholt if (!(dev->driver->driver_features & DRIVER_GEM)) 571673a394bSEric Anholt return -ENODEV; 572673a394bSEric Anholt 573673a394bSEric Anholt ret = drm_gem_handle_delete(file_priv, args->handle); 574673a394bSEric Anholt 575673a394bSEric Anholt return ret; 576673a394bSEric Anholt } 577673a394bSEric Anholt 578673a394bSEric Anholt /** 579673a394bSEric Anholt * Create a global name for an object, returning the name. 580673a394bSEric Anholt * 581673a394bSEric Anholt * Note that the name does not hold a reference; when the object 582673a394bSEric Anholt * is freed, the name goes away. 583673a394bSEric Anholt */ 584673a394bSEric Anholt int 585673a394bSEric Anholt drm_gem_flink_ioctl(struct drm_device *dev, void *data, 586673a394bSEric Anholt struct drm_file *file_priv) 587673a394bSEric Anholt { 588673a394bSEric Anholt struct drm_gem_flink *args = data; 589673a394bSEric Anholt struct drm_gem_object *obj; 590673a394bSEric Anholt int ret; 591673a394bSEric Anholt 592673a394bSEric Anholt if (!(dev->driver->driver_features & DRIVER_GEM)) 593673a394bSEric Anholt return -ENODEV; 594673a394bSEric Anholt 595673a394bSEric Anholt obj = drm_gem_object_lookup(dev, file_priv, args->handle); 596673a394bSEric Anholt if (obj == NULL) 597bf79cb91SChris Wilson return -ENOENT; 598673a394bSEric Anholt 599cd4f013fSDaniel Vetter mutex_lock(&dev->object_name_lock); 6002e928815STejun Heo idr_preload(GFP_KERNEL); 601a8e11d1cSDaniel Vetter /* prevent races with concurrent gem_close. */ 602a8e11d1cSDaniel Vetter if (obj->handle_count == 0) { 603a8e11d1cSDaniel Vetter ret = -ENOENT; 604a8e11d1cSDaniel Vetter goto err; 605a8e11d1cSDaniel Vetter } 606a8e11d1cSDaniel Vetter 6078d59bae5SChris Wilson if (!obj->name) { 6082e928815STejun Heo ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 6092e928815STejun Heo if (ret < 0) 6103e49c4f4SChris Wilson goto err; 6112e07fb22SYoungJun Cho 6122e07fb22SYoungJun Cho obj->name = ret; 613673a394bSEric Anholt 6148d59bae5SChris Wilson /* Allocate a reference for the name table. */ 6158d59bae5SChris Wilson drm_gem_object_reference(obj); 6168d59bae5SChris Wilson } 6173e49c4f4SChris Wilson 6182e07fb22SYoungJun Cho args->name = (uint64_t) obj->name; 6192e07fb22SYoungJun Cho ret = 0; 6202e07fb22SYoungJun Cho 6213e49c4f4SChris Wilson err: 6222e07fb22SYoungJun Cho idr_preload_end(); 623cd4f013fSDaniel Vetter mutex_unlock(&dev->object_name_lock); 624bc9025bdSLuca Barbieri drm_gem_object_unreference_unlocked(obj); 6253e49c4f4SChris Wilson return ret; 626673a394bSEric Anholt } 627673a394bSEric Anholt 628673a394bSEric Anholt /** 629673a394bSEric Anholt * Open an object using the global name, returning a handle and the size. 630673a394bSEric Anholt * 631673a394bSEric Anholt * This handle (of course) holds a reference to the object, so the object 632673a394bSEric Anholt * will not go away until the handle is deleted. 633673a394bSEric Anholt */ 634673a394bSEric Anholt int 635673a394bSEric Anholt drm_gem_open_ioctl(struct drm_device *dev, void *data, 636673a394bSEric Anholt struct drm_file *file_priv) 637673a394bSEric Anholt { 638673a394bSEric Anholt struct drm_gem_open *args = data; 639673a394bSEric Anholt struct drm_gem_object *obj; 640673a394bSEric Anholt int ret; 641a1a2d1d3SPekka Paalanen u32 handle; 642673a394bSEric Anholt 643673a394bSEric Anholt if (!(dev->driver->driver_features & DRIVER_GEM)) 644673a394bSEric Anholt return -ENODEV; 645673a394bSEric Anholt 646cd4f013fSDaniel Vetter mutex_lock(&dev->object_name_lock); 647673a394bSEric Anholt obj = idr_find(&dev->object_name_idr, (int) args->name); 648*20228c44SDaniel Vetter if (obj) { 649673a394bSEric Anholt drm_gem_object_reference(obj); 650*20228c44SDaniel Vetter } else { 651cd4f013fSDaniel Vetter mutex_unlock(&dev->object_name_lock); 652673a394bSEric Anholt return -ENOENT; 653*20228c44SDaniel Vetter } 654673a394bSEric Anholt 655*20228c44SDaniel Vetter /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 656*20228c44SDaniel Vetter ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 657bc9025bdSLuca Barbieri drm_gem_object_unreference_unlocked(obj); 658673a394bSEric Anholt if (ret) 659673a394bSEric Anholt return ret; 660673a394bSEric Anholt 661673a394bSEric Anholt args->handle = handle; 662673a394bSEric Anholt args->size = obj->size; 663673a394bSEric Anholt 664673a394bSEric Anholt return 0; 665673a394bSEric Anholt } 666673a394bSEric Anholt 667673a394bSEric Anholt /** 668673a394bSEric Anholt * Called at device open time, sets up the structure for handling refcounting 669673a394bSEric Anholt * of mm objects. 670673a394bSEric Anholt */ 671673a394bSEric Anholt void 672673a394bSEric Anholt drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 673673a394bSEric Anholt { 674673a394bSEric Anholt idr_init(&file_private->object_idr); 675673a394bSEric Anholt spin_lock_init(&file_private->table_lock); 676673a394bSEric Anholt } 677673a394bSEric Anholt 678673a394bSEric Anholt /** 679673a394bSEric Anholt * Called at device close to release the file's 680673a394bSEric Anholt * handle references on objects. 681673a394bSEric Anholt */ 682673a394bSEric Anholt static int 683673a394bSEric Anholt drm_gem_object_release_handle(int id, void *ptr, void *data) 684673a394bSEric Anholt { 685304eda32SBen Skeggs struct drm_file *file_priv = data; 686673a394bSEric Anholt struct drm_gem_object *obj = ptr; 687304eda32SBen Skeggs struct drm_device *dev = obj->dev; 688304eda32SBen Skeggs 6890ff926c7SDave Airlie drm_gem_remove_prime_handles(obj, file_priv); 6903248877eSDave Airlie 691304eda32SBen Skeggs if (dev->driver->gem_close_object) 692304eda32SBen Skeggs dev->driver->gem_close_object(obj, file_priv); 693673a394bSEric Anholt 694bc9025bdSLuca Barbieri drm_gem_object_handle_unreference_unlocked(obj); 695673a394bSEric Anholt 696673a394bSEric Anholt return 0; 697673a394bSEric Anholt } 698673a394bSEric Anholt 699673a394bSEric Anholt /** 700673a394bSEric Anholt * Called at close time when the filp is going away. 701673a394bSEric Anholt * 702673a394bSEric Anholt * Releases any remaining references on objects by this filp. 703673a394bSEric Anholt */ 704673a394bSEric Anholt void 705673a394bSEric Anholt drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 706673a394bSEric Anholt { 707673a394bSEric Anholt idr_for_each(&file_private->object_idr, 708304eda32SBen Skeggs &drm_gem_object_release_handle, file_private); 709673a394bSEric Anholt idr_destroy(&file_private->object_idr); 710673a394bSEric Anholt } 711673a394bSEric Anholt 712fd632aa3SDaniel Vetter void 713fd632aa3SDaniel Vetter drm_gem_object_release(struct drm_gem_object *obj) 714c3ae90c0SLuca Barbieri { 71562cb7011SAlan Cox if (obj->filp) 716c3ae90c0SLuca Barbieri fput(obj->filp); 717c3ae90c0SLuca Barbieri } 718fd632aa3SDaniel Vetter EXPORT_SYMBOL(drm_gem_object_release); 719c3ae90c0SLuca Barbieri 720673a394bSEric Anholt /** 721673a394bSEric Anholt * Called after the last reference to the object has been lost. 722c3ae90c0SLuca Barbieri * Must be called holding struct_ mutex 723673a394bSEric Anholt * 724673a394bSEric Anholt * Frees the object 725673a394bSEric Anholt */ 726673a394bSEric Anholt void 727673a394bSEric Anholt drm_gem_object_free(struct kref *kref) 728673a394bSEric Anholt { 729673a394bSEric Anholt struct drm_gem_object *obj = (struct drm_gem_object *) kref; 730673a394bSEric Anholt struct drm_device *dev = obj->dev; 731673a394bSEric Anholt 732673a394bSEric Anholt BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 733673a394bSEric Anholt 734673a394bSEric Anholt if (dev->driver->gem_free_object != NULL) 735673a394bSEric Anholt dev->driver->gem_free_object(obj); 736673a394bSEric Anholt } 737673a394bSEric Anholt EXPORT_SYMBOL(drm_gem_object_free); 738673a394bSEric Anholt 739ab00b3e5SJesse Barnes void drm_gem_vm_open(struct vm_area_struct *vma) 740ab00b3e5SJesse Barnes { 741ab00b3e5SJesse Barnes struct drm_gem_object *obj = vma->vm_private_data; 742ab00b3e5SJesse Barnes 743ab00b3e5SJesse Barnes drm_gem_object_reference(obj); 74431dfbc93SChris Wilson 74531dfbc93SChris Wilson mutex_lock(&obj->dev->struct_mutex); 746b06d66beSRob Clark drm_vm_open_locked(obj->dev, vma); 74731dfbc93SChris Wilson mutex_unlock(&obj->dev->struct_mutex); 748ab00b3e5SJesse Barnes } 749ab00b3e5SJesse Barnes EXPORT_SYMBOL(drm_gem_vm_open); 750ab00b3e5SJesse Barnes 751ab00b3e5SJesse Barnes void drm_gem_vm_close(struct vm_area_struct *vma) 752ab00b3e5SJesse Barnes { 753ab00b3e5SJesse Barnes struct drm_gem_object *obj = vma->vm_private_data; 754b74ad5aeSChris Wilson struct drm_device *dev = obj->dev; 755ab00b3e5SJesse Barnes 756b74ad5aeSChris Wilson mutex_lock(&dev->struct_mutex); 757b06d66beSRob Clark drm_vm_close_locked(obj->dev, vma); 75831dfbc93SChris Wilson drm_gem_object_unreference(obj); 759b74ad5aeSChris Wilson mutex_unlock(&dev->struct_mutex); 760ab00b3e5SJesse Barnes } 761ab00b3e5SJesse Barnes EXPORT_SYMBOL(drm_gem_vm_close); 762ab00b3e5SJesse Barnes 7631c5aafa6SLaurent Pinchart /** 7641c5aafa6SLaurent Pinchart * drm_gem_mmap_obj - memory map a GEM object 7651c5aafa6SLaurent Pinchart * @obj: the GEM object to map 7661c5aafa6SLaurent Pinchart * @obj_size: the object size to be mapped, in bytes 7671c5aafa6SLaurent Pinchart * @vma: VMA for the area to be mapped 7681c5aafa6SLaurent Pinchart * 7691c5aafa6SLaurent Pinchart * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 7701c5aafa6SLaurent Pinchart * provided by the driver. Depending on their requirements, drivers can either 7711c5aafa6SLaurent Pinchart * provide a fault handler in their gem_vm_ops (in which case any accesses to 7721c5aafa6SLaurent Pinchart * the object will be trapped, to perform migration, GTT binding, surface 7731c5aafa6SLaurent Pinchart * register allocation, or performance monitoring), or mmap the buffer memory 7741c5aafa6SLaurent Pinchart * synchronously after calling drm_gem_mmap_obj. 7751c5aafa6SLaurent Pinchart * 7761c5aafa6SLaurent Pinchart * This function is mainly intended to implement the DMABUF mmap operation, when 7771c5aafa6SLaurent Pinchart * the GEM object is not looked up based on its fake offset. To implement the 7781c5aafa6SLaurent Pinchart * DRM mmap operation, drivers should use the drm_gem_mmap() function. 7791c5aafa6SLaurent Pinchart * 7804368dd84SYoungJun Cho * NOTE: This function has to be protected with dev->struct_mutex 7814368dd84SYoungJun Cho * 7821c5aafa6SLaurent Pinchart * Return 0 or success or -EINVAL if the object size is smaller than the VMA 7831c5aafa6SLaurent Pinchart * size, or if no gem_vm_ops are provided. 7841c5aafa6SLaurent Pinchart */ 7851c5aafa6SLaurent Pinchart int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 7861c5aafa6SLaurent Pinchart struct vm_area_struct *vma) 7871c5aafa6SLaurent Pinchart { 7881c5aafa6SLaurent Pinchart struct drm_device *dev = obj->dev; 7891c5aafa6SLaurent Pinchart 7904368dd84SYoungJun Cho lockdep_assert_held(&dev->struct_mutex); 7914368dd84SYoungJun Cho 7921c5aafa6SLaurent Pinchart /* Check for valid size. */ 7931c5aafa6SLaurent Pinchart if (obj_size < vma->vm_end - vma->vm_start) 7941c5aafa6SLaurent Pinchart return -EINVAL; 7951c5aafa6SLaurent Pinchart 7961c5aafa6SLaurent Pinchart if (!dev->driver->gem_vm_ops) 7971c5aafa6SLaurent Pinchart return -EINVAL; 7981c5aafa6SLaurent Pinchart 7991c5aafa6SLaurent Pinchart vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 8001c5aafa6SLaurent Pinchart vma->vm_ops = dev->driver->gem_vm_ops; 8011c5aafa6SLaurent Pinchart vma->vm_private_data = obj; 8021c5aafa6SLaurent Pinchart vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 8031c5aafa6SLaurent Pinchart 8041c5aafa6SLaurent Pinchart /* Take a ref for this mapping of the object, so that the fault 8051c5aafa6SLaurent Pinchart * handler can dereference the mmap offset's pointer to the object. 8061c5aafa6SLaurent Pinchart * This reference is cleaned up by the corresponding vm_close 8071c5aafa6SLaurent Pinchart * (which should happen whether the vma was created by this call, or 8081c5aafa6SLaurent Pinchart * by a vm_open due to mremap or partial unmap or whatever). 8091c5aafa6SLaurent Pinchart */ 8101c5aafa6SLaurent Pinchart drm_gem_object_reference(obj); 8111c5aafa6SLaurent Pinchart 8121c5aafa6SLaurent Pinchart drm_vm_open_locked(dev, vma); 8131c5aafa6SLaurent Pinchart return 0; 8141c5aafa6SLaurent Pinchart } 8151c5aafa6SLaurent Pinchart EXPORT_SYMBOL(drm_gem_mmap_obj); 816ab00b3e5SJesse Barnes 817a2c0a97bSJesse Barnes /** 818a2c0a97bSJesse Barnes * drm_gem_mmap - memory map routine for GEM objects 819a2c0a97bSJesse Barnes * @filp: DRM file pointer 820a2c0a97bSJesse Barnes * @vma: VMA for the area to be mapped 821a2c0a97bSJesse Barnes * 822a2c0a97bSJesse Barnes * If a driver supports GEM object mapping, mmap calls on the DRM file 823a2c0a97bSJesse Barnes * descriptor will end up here. 824a2c0a97bSJesse Barnes * 8251c5aafa6SLaurent Pinchart * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 826a2c0a97bSJesse Barnes * contain the fake offset we created when the GTT map ioctl was called on 8271c5aafa6SLaurent Pinchart * the object) and map it with a call to drm_gem_mmap_obj(). 828a2c0a97bSJesse Barnes */ 829a2c0a97bSJesse Barnes int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 830a2c0a97bSJesse Barnes { 831a2c0a97bSJesse Barnes struct drm_file *priv = filp->private_data; 832a2c0a97bSJesse Barnes struct drm_device *dev = priv->minor->dev; 833a2c0a97bSJesse Barnes struct drm_gem_mm *mm = dev->mm_private; 8340de23977SDavid Herrmann struct drm_gem_object *obj; 8350de23977SDavid Herrmann struct drm_vma_offset_node *node; 836a2c0a97bSJesse Barnes int ret = 0; 837a2c0a97bSJesse Barnes 8382c07a21dSDave Airlie if (drm_device_is_unplugged(dev)) 8392c07a21dSDave Airlie return -ENODEV; 8402c07a21dSDave Airlie 841a2c0a97bSJesse Barnes mutex_lock(&dev->struct_mutex); 842a2c0a97bSJesse Barnes 8430de23977SDavid Herrmann node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff, 8440de23977SDavid Herrmann vma_pages(vma)); 8450de23977SDavid Herrmann if (!node) { 846a2c0a97bSJesse Barnes mutex_unlock(&dev->struct_mutex); 847a2c0a97bSJesse Barnes return drm_mmap(filp, vma); 848a2c0a97bSJesse Barnes } 849a2c0a97bSJesse Barnes 8500de23977SDavid Herrmann obj = container_of(node, struct drm_gem_object, vma_node); 851aed2c03cSDavid Herrmann ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); 852a2c0a97bSJesse Barnes 853a2c0a97bSJesse Barnes mutex_unlock(&dev->struct_mutex); 854a2c0a97bSJesse Barnes 855a2c0a97bSJesse Barnes return ret; 856a2c0a97bSJesse Barnes } 857a2c0a97bSJesse Barnes EXPORT_SYMBOL(drm_gem_mmap); 858