1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28 #include <linux/types.h> 29 #include <linux/slab.h> 30 #include <linux/mm.h> 31 #include <linux/uaccess.h> 32 #include <linux/fs.h> 33 #include <linux/file.h> 34 #include <linux/module.h> 35 #include <linux/mman.h> 36 #include <linux/pagemap.h> 37 #include "drmP.h" 38 39 /** @file drm_gem.c 40 * 41 * This file provides some of the base ioctls and library routines for 42 * the graphics memory manager implemented by each device driver. 43 * 44 * Because various devices have different requirements in terms of 45 * synchronization and migration strategies, implementing that is left up to 46 * the driver, and all that the general API provides should be generic -- 47 * allocating objects, reading/writing data with the cpu, freeing objects. 48 * Even there, platform-dependent optimizations for reading/writing data with 49 * the CPU mean we'll likely hook those out to driver-specific calls. However, 50 * the DRI2 implementation wants to have at least allocate/mmap be generic. 51 * 52 * The goal was to have swap-backed object allocation managed through 53 * struct file. However, file descriptors as handles to a struct file have 54 * two major failings: 55 * - Process limits prevent more than 1024 or so being used at a time by 56 * default. 57 * - Inability to allocate high fds will aggravate the X Server's select() 58 * handling, and likely that of many GL client applications as well. 59 * 60 * This led to a plan of using our own integer IDs (called handles, following 61 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 62 * ioctls. The objects themselves will still include the struct file so 63 * that we can transition to fds if the required kernel infrastructure shows 64 * up at a later date, and as our interface with shmfs for memory allocation. 65 */ 66 67 /* 68 * We make up offsets for buffer objects so we can recognize them at 69 * mmap time. 70 */ 71 72 /* pgoff in mmap is an unsigned long, so we need to make sure that 73 * the faked up offset will fit 74 */ 75 76 #if BITS_PER_LONG == 64 77 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 78 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 79 #else 80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 82 #endif 83 84 /** 85 * Initialize the GEM device fields 86 */ 87 88 int 89 drm_gem_init(struct drm_device *dev) 90 { 91 struct drm_gem_mm *mm; 92 93 spin_lock_init(&dev->object_name_lock); 94 idr_init(&dev->object_name_idr); 95 96 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); 97 if (!mm) { 98 DRM_ERROR("out of memory\n"); 99 return -ENOMEM; 100 } 101 102 dev->mm_private = mm; 103 104 if (drm_ht_create(&mm->offset_hash, 19)) { 105 kfree(mm); 106 return -ENOMEM; 107 } 108 109 if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, 110 DRM_FILE_PAGE_OFFSET_SIZE)) { 111 drm_ht_remove(&mm->offset_hash); 112 kfree(mm); 113 return -ENOMEM; 114 } 115 116 return 0; 117 } 118 119 void 120 drm_gem_destroy(struct drm_device *dev) 121 { 122 struct drm_gem_mm *mm = dev->mm_private; 123 124 drm_mm_takedown(&mm->offset_manager); 125 drm_ht_remove(&mm->offset_hash); 126 kfree(mm); 127 dev->mm_private = NULL; 128 } 129 130 /** 131 * Initialize an already allocate GEM object of the specified size with 132 * shmfs backing store. 133 */ 134 int drm_gem_object_init(struct drm_device *dev, 135 struct drm_gem_object *obj, size_t size) 136 { 137 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 138 139 obj->dev = dev; 140 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 141 if (IS_ERR(obj->filp)) 142 return -ENOMEM; 143 144 kref_init(&obj->refcount); 145 atomic_set(&obj->handle_count, 0); 146 obj->size = size; 147 148 return 0; 149 } 150 EXPORT_SYMBOL(drm_gem_object_init); 151 152 /** 153 * Allocate a GEM object of the specified size with shmfs backing store 154 */ 155 struct drm_gem_object * 156 drm_gem_object_alloc(struct drm_device *dev, size_t size) 157 { 158 struct drm_gem_object *obj; 159 160 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 161 if (!obj) 162 goto free; 163 164 if (drm_gem_object_init(dev, obj, size) != 0) 165 goto free; 166 167 if (dev->driver->gem_init_object != NULL && 168 dev->driver->gem_init_object(obj) != 0) { 169 goto fput; 170 } 171 return obj; 172 fput: 173 /* Object_init mangles the global counters - readjust them. */ 174 fput(obj->filp); 175 free: 176 kfree(obj); 177 return NULL; 178 } 179 EXPORT_SYMBOL(drm_gem_object_alloc); 180 181 /** 182 * Removes the mapping from handle to filp for this object. 183 */ 184 static int 185 drm_gem_handle_delete(struct drm_file *filp, u32 handle) 186 { 187 struct drm_device *dev; 188 struct drm_gem_object *obj; 189 190 /* This is gross. The idr system doesn't let us try a delete and 191 * return an error code. It just spews if you fail at deleting. 192 * So, we have to grab a lock around finding the object and then 193 * doing the delete on it and dropping the refcount, or the user 194 * could race us to double-decrement the refcount and cause a 195 * use-after-free later. Given the frequency of our handle lookups, 196 * we may want to use ida for number allocation and a hash table 197 * for the pointers, anyway. 198 */ 199 spin_lock(&filp->table_lock); 200 201 /* Check if we currently have a reference on the object */ 202 obj = idr_find(&filp->object_idr, handle); 203 if (obj == NULL) { 204 spin_unlock(&filp->table_lock); 205 return -EINVAL; 206 } 207 dev = obj->dev; 208 209 /* Release reference and decrement refcount. */ 210 idr_remove(&filp->object_idr, handle); 211 spin_unlock(&filp->table_lock); 212 213 drm_gem_object_handle_unreference_unlocked(obj); 214 215 return 0; 216 } 217 218 /** 219 * Create a handle for this object. This adds a handle reference 220 * to the object, which includes a regular reference count. Callers 221 * will likely want to dereference the object afterwards. 222 */ 223 int 224 drm_gem_handle_create(struct drm_file *file_priv, 225 struct drm_gem_object *obj, 226 u32 *handlep) 227 { 228 int ret; 229 230 /* 231 * Get the user-visible handle using idr. 232 */ 233 again: 234 /* ensure there is space available to allocate a handle */ 235 if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 236 return -ENOMEM; 237 238 /* do the allocation under our spinlock */ 239 spin_lock(&file_priv->table_lock); 240 ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); 241 spin_unlock(&file_priv->table_lock); 242 if (ret == -EAGAIN) 243 goto again; 244 245 if (ret != 0) 246 return ret; 247 248 drm_gem_object_handle_reference(obj); 249 return 0; 250 } 251 EXPORT_SYMBOL(drm_gem_handle_create); 252 253 /** Returns a reference to the object named by the handle. */ 254 struct drm_gem_object * 255 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 256 u32 handle) 257 { 258 struct drm_gem_object *obj; 259 260 spin_lock(&filp->table_lock); 261 262 /* Check if we currently have a reference on the object */ 263 obj = idr_find(&filp->object_idr, handle); 264 if (obj == NULL) { 265 spin_unlock(&filp->table_lock); 266 return NULL; 267 } 268 269 drm_gem_object_reference(obj); 270 271 spin_unlock(&filp->table_lock); 272 273 return obj; 274 } 275 EXPORT_SYMBOL(drm_gem_object_lookup); 276 277 /** 278 * Releases the handle to an mm object. 279 */ 280 int 281 drm_gem_close_ioctl(struct drm_device *dev, void *data, 282 struct drm_file *file_priv) 283 { 284 struct drm_gem_close *args = data; 285 int ret; 286 287 if (!(dev->driver->driver_features & DRIVER_GEM)) 288 return -ENODEV; 289 290 ret = drm_gem_handle_delete(file_priv, args->handle); 291 292 return ret; 293 } 294 295 /** 296 * Create a global name for an object, returning the name. 297 * 298 * Note that the name does not hold a reference; when the object 299 * is freed, the name goes away. 300 */ 301 int 302 drm_gem_flink_ioctl(struct drm_device *dev, void *data, 303 struct drm_file *file_priv) 304 { 305 struct drm_gem_flink *args = data; 306 struct drm_gem_object *obj; 307 int ret; 308 309 if (!(dev->driver->driver_features & DRIVER_GEM)) 310 return -ENODEV; 311 312 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 313 if (obj == NULL) 314 return -ENOENT; 315 316 again: 317 if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { 318 ret = -ENOMEM; 319 goto err; 320 } 321 322 spin_lock(&dev->object_name_lock); 323 if (!obj->name) { 324 ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 325 &obj->name); 326 args->name = (uint64_t) obj->name; 327 spin_unlock(&dev->object_name_lock); 328 329 if (ret == -EAGAIN) 330 goto again; 331 332 if (ret != 0) 333 goto err; 334 335 /* Allocate a reference for the name table. */ 336 drm_gem_object_reference(obj); 337 } else { 338 args->name = (uint64_t) obj->name; 339 spin_unlock(&dev->object_name_lock); 340 ret = 0; 341 } 342 343 err: 344 drm_gem_object_unreference_unlocked(obj); 345 return ret; 346 } 347 348 /** 349 * Open an object using the global name, returning a handle and the size. 350 * 351 * This handle (of course) holds a reference to the object, so the object 352 * will not go away until the handle is deleted. 353 */ 354 int 355 drm_gem_open_ioctl(struct drm_device *dev, void *data, 356 struct drm_file *file_priv) 357 { 358 struct drm_gem_open *args = data; 359 struct drm_gem_object *obj; 360 int ret; 361 u32 handle; 362 363 if (!(dev->driver->driver_features & DRIVER_GEM)) 364 return -ENODEV; 365 366 spin_lock(&dev->object_name_lock); 367 obj = idr_find(&dev->object_name_idr, (int) args->name); 368 if (obj) 369 drm_gem_object_reference(obj); 370 spin_unlock(&dev->object_name_lock); 371 if (!obj) 372 return -ENOENT; 373 374 ret = drm_gem_handle_create(file_priv, obj, &handle); 375 drm_gem_object_unreference_unlocked(obj); 376 if (ret) 377 return ret; 378 379 args->handle = handle; 380 args->size = obj->size; 381 382 return 0; 383 } 384 385 /** 386 * Called at device open time, sets up the structure for handling refcounting 387 * of mm objects. 388 */ 389 void 390 drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 391 { 392 idr_init(&file_private->object_idr); 393 spin_lock_init(&file_private->table_lock); 394 } 395 396 /** 397 * Called at device close to release the file's 398 * handle references on objects. 399 */ 400 static int 401 drm_gem_object_release_handle(int id, void *ptr, void *data) 402 { 403 struct drm_gem_object *obj = ptr; 404 405 drm_gem_object_handle_unreference_unlocked(obj); 406 407 return 0; 408 } 409 410 /** 411 * Called at close time when the filp is going away. 412 * 413 * Releases any remaining references on objects by this filp. 414 */ 415 void 416 drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 417 { 418 idr_for_each(&file_private->object_idr, 419 &drm_gem_object_release_handle, NULL); 420 421 idr_remove_all(&file_private->object_idr); 422 idr_destroy(&file_private->object_idr); 423 } 424 425 void 426 drm_gem_object_release(struct drm_gem_object *obj) 427 { 428 fput(obj->filp); 429 } 430 EXPORT_SYMBOL(drm_gem_object_release); 431 432 /** 433 * Called after the last reference to the object has been lost. 434 * Must be called holding struct_ mutex 435 * 436 * Frees the object 437 */ 438 void 439 drm_gem_object_free(struct kref *kref) 440 { 441 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 442 struct drm_device *dev = obj->dev; 443 444 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 445 446 if (dev->driver->gem_free_object != NULL) 447 dev->driver->gem_free_object(obj); 448 } 449 EXPORT_SYMBOL(drm_gem_object_free); 450 451 static void drm_gem_object_ref_bug(struct kref *list_kref) 452 { 453 BUG(); 454 } 455 456 /** 457 * Called after the last handle to the object has been closed 458 * 459 * Removes any name for the object. Note that this must be 460 * called before drm_gem_object_free or we'll be touching 461 * freed memory 462 */ 463 void drm_gem_object_handle_free(struct drm_gem_object *obj) 464 { 465 struct drm_device *dev = obj->dev; 466 467 /* Remove any name for this object */ 468 spin_lock(&dev->object_name_lock); 469 if (obj->name) { 470 idr_remove(&dev->object_name_idr, obj->name); 471 obj->name = 0; 472 spin_unlock(&dev->object_name_lock); 473 /* 474 * The object name held a reference to this object, drop 475 * that now. 476 * 477 * This cannot be the last reference, since the handle holds one too. 478 */ 479 kref_put(&obj->refcount, drm_gem_object_ref_bug); 480 } else 481 spin_unlock(&dev->object_name_lock); 482 483 } 484 EXPORT_SYMBOL(drm_gem_object_handle_free); 485 486 void drm_gem_vm_open(struct vm_area_struct *vma) 487 { 488 struct drm_gem_object *obj = vma->vm_private_data; 489 490 drm_gem_object_reference(obj); 491 492 mutex_lock(&obj->dev->struct_mutex); 493 drm_vm_open_locked(vma); 494 mutex_unlock(&obj->dev->struct_mutex); 495 } 496 EXPORT_SYMBOL(drm_gem_vm_open); 497 498 void drm_gem_vm_close(struct vm_area_struct *vma) 499 { 500 struct drm_gem_object *obj = vma->vm_private_data; 501 502 mutex_lock(&obj->dev->struct_mutex); 503 drm_vm_close_locked(vma); 504 drm_gem_object_unreference(obj); 505 mutex_unlock(&obj->dev->struct_mutex); 506 } 507 EXPORT_SYMBOL(drm_gem_vm_close); 508 509 510 /** 511 * drm_gem_mmap - memory map routine for GEM objects 512 * @filp: DRM file pointer 513 * @vma: VMA for the area to be mapped 514 * 515 * If a driver supports GEM object mapping, mmap calls on the DRM file 516 * descriptor will end up here. 517 * 518 * If we find the object based on the offset passed in (vma->vm_pgoff will 519 * contain the fake offset we created when the GTT map ioctl was called on 520 * the object), we set up the driver fault handler so that any accesses 521 * to the object can be trapped, to perform migration, GTT binding, surface 522 * register allocation, or performance monitoring. 523 */ 524 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 525 { 526 struct drm_file *priv = filp->private_data; 527 struct drm_device *dev = priv->minor->dev; 528 struct drm_gem_mm *mm = dev->mm_private; 529 struct drm_local_map *map = NULL; 530 struct drm_gem_object *obj; 531 struct drm_hash_item *hash; 532 int ret = 0; 533 534 mutex_lock(&dev->struct_mutex); 535 536 if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { 537 mutex_unlock(&dev->struct_mutex); 538 return drm_mmap(filp, vma); 539 } 540 541 map = drm_hash_entry(hash, struct drm_map_list, hash)->map; 542 if (!map || 543 ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { 544 ret = -EPERM; 545 goto out_unlock; 546 } 547 548 /* Check for valid size. */ 549 if (map->size < vma->vm_end - vma->vm_start) { 550 ret = -EINVAL; 551 goto out_unlock; 552 } 553 554 obj = map->handle; 555 if (!obj->dev->driver->gem_vm_ops) { 556 ret = -EINVAL; 557 goto out_unlock; 558 } 559 560 vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; 561 vma->vm_ops = obj->dev->driver->gem_vm_ops; 562 vma->vm_private_data = map->handle; 563 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 564 565 /* Take a ref for this mapping of the object, so that the fault 566 * handler can dereference the mmap offset's pointer to the object. 567 * This reference is cleaned up by the corresponding vm_close 568 * (which should happen whether the vma was created by this call, or 569 * by a vm_open due to mremap or partial unmap or whatever). 570 */ 571 drm_gem_object_reference(obj); 572 573 vma->vm_file = filp; /* Needed for drm_vm_open() */ 574 drm_vm_open_locked(vma); 575 576 out_unlock: 577 mutex_unlock(&dev->struct_mutex); 578 579 return ret; 580 } 581 EXPORT_SYMBOL(drm_gem_mmap); 582