xref: /openbmc/linux/drivers/gpu/drm/drm_gem.c (revision c4c11dd1)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27 
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <drm/drmP.h>
40 #include <drm/drm_vma_manager.h>
41 
42 /** @file drm_gem.c
43  *
44  * This file provides some of the base ioctls and library routines for
45  * the graphics memory manager implemented by each device driver.
46  *
47  * Because various devices have different requirements in terms of
48  * synchronization and migration strategies, implementing that is left up to
49  * the driver, and all that the general API provides should be generic --
50  * allocating objects, reading/writing data with the cpu, freeing objects.
51  * Even there, platform-dependent optimizations for reading/writing data with
52  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
53  * the DRI2 implementation wants to have at least allocate/mmap be generic.
54  *
55  * The goal was to have swap-backed object allocation managed through
56  * struct file.  However, file descriptors as handles to a struct file have
57  * two major failings:
58  * - Process limits prevent more than 1024 or so being used at a time by
59  *   default.
60  * - Inability to allocate high fds will aggravate the X Server's select()
61  *   handling, and likely that of many GL client applications as well.
62  *
63  * This led to a plan of using our own integer IDs (called handles, following
64  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65  * ioctls.  The objects themselves will still include the struct file so
66  * that we can transition to fds if the required kernel infrastructure shows
67  * up at a later date, and as our interface with shmfs for memory allocation.
68  */
69 
70 /*
71  * We make up offsets for buffer objects so we can recognize them at
72  * mmap time.
73  */
74 
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76  * the faked up offset will fit
77  */
78 
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #else
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #endif
86 
87 /**
88  * Initialize the GEM device fields
89  */
90 
91 int
92 drm_gem_init(struct drm_device *dev)
93 {
94 	struct drm_gem_mm *mm;
95 
96 	spin_lock_init(&dev->object_name_lock);
97 	idr_init(&dev->object_name_idr);
98 
99 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
100 	if (!mm) {
101 		DRM_ERROR("out of memory\n");
102 		return -ENOMEM;
103 	}
104 
105 	dev->mm_private = mm;
106 	drm_vma_offset_manager_init(&mm->vma_manager,
107 				    DRM_FILE_PAGE_OFFSET_START,
108 				    DRM_FILE_PAGE_OFFSET_SIZE);
109 
110 	return 0;
111 }
112 
113 void
114 drm_gem_destroy(struct drm_device *dev)
115 {
116 	struct drm_gem_mm *mm = dev->mm_private;
117 
118 	drm_vma_offset_manager_destroy(&mm->vma_manager);
119 	kfree(mm);
120 	dev->mm_private = NULL;
121 }
122 
123 /**
124  * Initialize an already allocated GEM object of the specified size with
125  * shmfs backing store.
126  */
127 int drm_gem_object_init(struct drm_device *dev,
128 			struct drm_gem_object *obj, size_t size)
129 {
130 	struct file *filp;
131 
132 	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
133 	if (IS_ERR(filp))
134 		return PTR_ERR(filp);
135 
136 	drm_gem_private_object_init(dev, obj, size);
137 	obj->filp = filp;
138 
139 	return 0;
140 }
141 EXPORT_SYMBOL(drm_gem_object_init);
142 
143 /**
144  * Initialize an already allocated GEM object of the specified size with
145  * no GEM provided backing store. Instead the caller is responsible for
146  * backing the object and handling it.
147  */
148 void drm_gem_private_object_init(struct drm_device *dev,
149 				 struct drm_gem_object *obj, size_t size)
150 {
151 	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
152 
153 	obj->dev = dev;
154 	obj->filp = NULL;
155 
156 	kref_init(&obj->refcount);
157 	atomic_set(&obj->handle_count, 0);
158 	obj->size = size;
159 }
160 EXPORT_SYMBOL(drm_gem_private_object_init);
161 
162 /**
163  * Allocate a GEM object of the specified size with shmfs backing store
164  */
165 struct drm_gem_object *
166 drm_gem_object_alloc(struct drm_device *dev, size_t size)
167 {
168 	struct drm_gem_object *obj;
169 
170 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
171 	if (!obj)
172 		goto free;
173 
174 	if (drm_gem_object_init(dev, obj, size) != 0)
175 		goto free;
176 
177 	if (dev->driver->gem_init_object != NULL &&
178 	    dev->driver->gem_init_object(obj) != 0) {
179 		goto fput;
180 	}
181 	return obj;
182 fput:
183 	/* Object_init mangles the global counters - readjust them. */
184 	fput(obj->filp);
185 free:
186 	kfree(obj);
187 	return NULL;
188 }
189 EXPORT_SYMBOL(drm_gem_object_alloc);
190 
191 static void
192 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
193 {
194 	if (obj->import_attach) {
195 		drm_prime_remove_buf_handle(&filp->prime,
196 				obj->import_attach->dmabuf);
197 	}
198 	if (obj->export_dma_buf) {
199 		drm_prime_remove_buf_handle(&filp->prime,
200 				obj->export_dma_buf);
201 	}
202 }
203 
204 /**
205  * Removes the mapping from handle to filp for this object.
206  */
207 int
208 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
209 {
210 	struct drm_device *dev;
211 	struct drm_gem_object *obj;
212 
213 	/* This is gross. The idr system doesn't let us try a delete and
214 	 * return an error code.  It just spews if you fail at deleting.
215 	 * So, we have to grab a lock around finding the object and then
216 	 * doing the delete on it and dropping the refcount, or the user
217 	 * could race us to double-decrement the refcount and cause a
218 	 * use-after-free later.  Given the frequency of our handle lookups,
219 	 * we may want to use ida for number allocation and a hash table
220 	 * for the pointers, anyway.
221 	 */
222 	spin_lock(&filp->table_lock);
223 
224 	/* Check if we currently have a reference on the object */
225 	obj = idr_find(&filp->object_idr, handle);
226 	if (obj == NULL) {
227 		spin_unlock(&filp->table_lock);
228 		return -EINVAL;
229 	}
230 	dev = obj->dev;
231 
232 	/* Release reference and decrement refcount. */
233 	idr_remove(&filp->object_idr, handle);
234 	spin_unlock(&filp->table_lock);
235 
236 	drm_gem_remove_prime_handles(obj, filp);
237 
238 	if (dev->driver->gem_close_object)
239 		dev->driver->gem_close_object(obj, filp);
240 	drm_gem_object_handle_unreference_unlocked(obj);
241 
242 	return 0;
243 }
244 EXPORT_SYMBOL(drm_gem_handle_delete);
245 
246 /**
247  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
248  *
249  * This implements the ->dumb_destroy kms driver callback for drivers which use
250  * gem to manage their backing storage.
251  */
252 int drm_gem_dumb_destroy(struct drm_file *file,
253 			 struct drm_device *dev,
254 			 uint32_t handle)
255 {
256 	return drm_gem_handle_delete(file, handle);
257 }
258 EXPORT_SYMBOL(drm_gem_dumb_destroy);
259 
260 /**
261  * Create a handle for this object. This adds a handle reference
262  * to the object, which includes a regular reference count. Callers
263  * will likely want to dereference the object afterwards.
264  */
265 int
266 drm_gem_handle_create(struct drm_file *file_priv,
267 		       struct drm_gem_object *obj,
268 		       u32 *handlep)
269 {
270 	struct drm_device *dev = obj->dev;
271 	int ret;
272 
273 	/*
274 	 * Get the user-visible handle using idr.  Preload and perform
275 	 * allocation under our spinlock.
276 	 */
277 	idr_preload(GFP_KERNEL);
278 	spin_lock(&file_priv->table_lock);
279 
280 	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
281 
282 	spin_unlock(&file_priv->table_lock);
283 	idr_preload_end();
284 	if (ret < 0)
285 		return ret;
286 	*handlep = ret;
287 
288 	drm_gem_object_handle_reference(obj);
289 
290 	if (dev->driver->gem_open_object) {
291 		ret = dev->driver->gem_open_object(obj, file_priv);
292 		if (ret) {
293 			drm_gem_handle_delete(file_priv, *handlep);
294 			return ret;
295 		}
296 	}
297 
298 	return 0;
299 }
300 EXPORT_SYMBOL(drm_gem_handle_create);
301 
302 
303 /**
304  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
305  * @obj: obj in question
306  *
307  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
308  */
309 void
310 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
311 {
312 	struct drm_device *dev = obj->dev;
313 	struct drm_gem_mm *mm = dev->mm_private;
314 
315 	drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
316 }
317 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
318 
319 /**
320  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
321  * @obj: obj in question
322  *
323  * GEM memory mapping works by handing back to userspace a fake mmap offset
324  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
325  * up the object based on the offset and sets up the various memory mapping
326  * structures.
327  *
328  * This routine allocates and attaches a fake offset for @obj.
329  */
330 int
331 drm_gem_create_mmap_offset(struct drm_gem_object *obj)
332 {
333 	struct drm_device *dev = obj->dev;
334 	struct drm_gem_mm *mm = dev->mm_private;
335 
336 	return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
337 				  obj->size / PAGE_SIZE);
338 }
339 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
340 
341 /** Returns a reference to the object named by the handle. */
342 struct drm_gem_object *
343 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
344 		      u32 handle)
345 {
346 	struct drm_gem_object *obj;
347 
348 	spin_lock(&filp->table_lock);
349 
350 	/* Check if we currently have a reference on the object */
351 	obj = idr_find(&filp->object_idr, handle);
352 	if (obj == NULL) {
353 		spin_unlock(&filp->table_lock);
354 		return NULL;
355 	}
356 
357 	drm_gem_object_reference(obj);
358 
359 	spin_unlock(&filp->table_lock);
360 
361 	return obj;
362 }
363 EXPORT_SYMBOL(drm_gem_object_lookup);
364 
365 /**
366  * Releases the handle to an mm object.
367  */
368 int
369 drm_gem_close_ioctl(struct drm_device *dev, void *data,
370 		    struct drm_file *file_priv)
371 {
372 	struct drm_gem_close *args = data;
373 	int ret;
374 
375 	if (!(dev->driver->driver_features & DRIVER_GEM))
376 		return -ENODEV;
377 
378 	ret = drm_gem_handle_delete(file_priv, args->handle);
379 
380 	return ret;
381 }
382 
383 /**
384  * Create a global name for an object, returning the name.
385  *
386  * Note that the name does not hold a reference; when the object
387  * is freed, the name goes away.
388  */
389 int
390 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
391 		    struct drm_file *file_priv)
392 {
393 	struct drm_gem_flink *args = data;
394 	struct drm_gem_object *obj;
395 	int ret;
396 
397 	if (!(dev->driver->driver_features & DRIVER_GEM))
398 		return -ENODEV;
399 
400 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
401 	if (obj == NULL)
402 		return -ENOENT;
403 
404 	idr_preload(GFP_KERNEL);
405 	spin_lock(&dev->object_name_lock);
406 	if (!obj->name) {
407 		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
408 		if (ret < 0)
409 			goto err;
410 
411 		obj->name = ret;
412 
413 		/* Allocate a reference for the name table.  */
414 		drm_gem_object_reference(obj);
415 	}
416 
417 	args->name = (uint64_t) obj->name;
418 	ret = 0;
419 
420 err:
421 	spin_unlock(&dev->object_name_lock);
422 	idr_preload_end();
423 	drm_gem_object_unreference_unlocked(obj);
424 	return ret;
425 }
426 
427 /**
428  * Open an object using the global name, returning a handle and the size.
429  *
430  * This handle (of course) holds a reference to the object, so the object
431  * will not go away until the handle is deleted.
432  */
433 int
434 drm_gem_open_ioctl(struct drm_device *dev, void *data,
435 		   struct drm_file *file_priv)
436 {
437 	struct drm_gem_open *args = data;
438 	struct drm_gem_object *obj;
439 	int ret;
440 	u32 handle;
441 
442 	if (!(dev->driver->driver_features & DRIVER_GEM))
443 		return -ENODEV;
444 
445 	spin_lock(&dev->object_name_lock);
446 	obj = idr_find(&dev->object_name_idr, (int) args->name);
447 	if (obj)
448 		drm_gem_object_reference(obj);
449 	spin_unlock(&dev->object_name_lock);
450 	if (!obj)
451 		return -ENOENT;
452 
453 	ret = drm_gem_handle_create(file_priv, obj, &handle);
454 	drm_gem_object_unreference_unlocked(obj);
455 	if (ret)
456 		return ret;
457 
458 	args->handle = handle;
459 	args->size = obj->size;
460 
461 	return 0;
462 }
463 
464 /**
465  * Called at device open time, sets up the structure for handling refcounting
466  * of mm objects.
467  */
468 void
469 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
470 {
471 	idr_init(&file_private->object_idr);
472 	spin_lock_init(&file_private->table_lock);
473 }
474 
475 /**
476  * Called at device close to release the file's
477  * handle references on objects.
478  */
479 static int
480 drm_gem_object_release_handle(int id, void *ptr, void *data)
481 {
482 	struct drm_file *file_priv = data;
483 	struct drm_gem_object *obj = ptr;
484 	struct drm_device *dev = obj->dev;
485 
486 	drm_gem_remove_prime_handles(obj, file_priv);
487 
488 	if (dev->driver->gem_close_object)
489 		dev->driver->gem_close_object(obj, file_priv);
490 
491 	drm_gem_object_handle_unreference_unlocked(obj);
492 
493 	return 0;
494 }
495 
496 /**
497  * Called at close time when the filp is going away.
498  *
499  * Releases any remaining references on objects by this filp.
500  */
501 void
502 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
503 {
504 	idr_for_each(&file_private->object_idr,
505 		     &drm_gem_object_release_handle, file_private);
506 	idr_destroy(&file_private->object_idr);
507 }
508 
509 void
510 drm_gem_object_release(struct drm_gem_object *obj)
511 {
512 	if (obj->filp)
513 	    fput(obj->filp);
514 }
515 EXPORT_SYMBOL(drm_gem_object_release);
516 
517 /**
518  * Called after the last reference to the object has been lost.
519  * Must be called holding struct_ mutex
520  *
521  * Frees the object
522  */
523 void
524 drm_gem_object_free(struct kref *kref)
525 {
526 	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
527 	struct drm_device *dev = obj->dev;
528 
529 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
530 
531 	if (dev->driver->gem_free_object != NULL)
532 		dev->driver->gem_free_object(obj);
533 }
534 EXPORT_SYMBOL(drm_gem_object_free);
535 
536 static void drm_gem_object_ref_bug(struct kref *list_kref)
537 {
538 	BUG();
539 }
540 
541 /**
542  * Called after the last handle to the object has been closed
543  *
544  * Removes any name for the object. Note that this must be
545  * called before drm_gem_object_free or we'll be touching
546  * freed memory
547  */
548 void drm_gem_object_handle_free(struct drm_gem_object *obj)
549 {
550 	struct drm_device *dev = obj->dev;
551 
552 	/* Remove any name for this object */
553 	spin_lock(&dev->object_name_lock);
554 	if (obj->name) {
555 		idr_remove(&dev->object_name_idr, obj->name);
556 		obj->name = 0;
557 		spin_unlock(&dev->object_name_lock);
558 		/*
559 		 * The object name held a reference to this object, drop
560 		 * that now.
561 		*
562 		* This cannot be the last reference, since the handle holds one too.
563 		 */
564 		kref_put(&obj->refcount, drm_gem_object_ref_bug);
565 	} else
566 		spin_unlock(&dev->object_name_lock);
567 
568 }
569 EXPORT_SYMBOL(drm_gem_object_handle_free);
570 
571 void drm_gem_vm_open(struct vm_area_struct *vma)
572 {
573 	struct drm_gem_object *obj = vma->vm_private_data;
574 
575 	drm_gem_object_reference(obj);
576 
577 	mutex_lock(&obj->dev->struct_mutex);
578 	drm_vm_open_locked(obj->dev, vma);
579 	mutex_unlock(&obj->dev->struct_mutex);
580 }
581 EXPORT_SYMBOL(drm_gem_vm_open);
582 
583 void drm_gem_vm_close(struct vm_area_struct *vma)
584 {
585 	struct drm_gem_object *obj = vma->vm_private_data;
586 	struct drm_device *dev = obj->dev;
587 
588 	mutex_lock(&dev->struct_mutex);
589 	drm_vm_close_locked(obj->dev, vma);
590 	drm_gem_object_unreference(obj);
591 	mutex_unlock(&dev->struct_mutex);
592 }
593 EXPORT_SYMBOL(drm_gem_vm_close);
594 
595 /**
596  * drm_gem_mmap_obj - memory map a GEM object
597  * @obj: the GEM object to map
598  * @obj_size: the object size to be mapped, in bytes
599  * @vma: VMA for the area to be mapped
600  *
601  * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
602  * provided by the driver. Depending on their requirements, drivers can either
603  * provide a fault handler in their gem_vm_ops (in which case any accesses to
604  * the object will be trapped, to perform migration, GTT binding, surface
605  * register allocation, or performance monitoring), or mmap the buffer memory
606  * synchronously after calling drm_gem_mmap_obj.
607  *
608  * This function is mainly intended to implement the DMABUF mmap operation, when
609  * the GEM object is not looked up based on its fake offset. To implement the
610  * DRM mmap operation, drivers should use the drm_gem_mmap() function.
611  *
612  * NOTE: This function has to be protected with dev->struct_mutex
613  *
614  * Return 0 or success or -EINVAL if the object size is smaller than the VMA
615  * size, or if no gem_vm_ops are provided.
616  */
617 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
618 		     struct vm_area_struct *vma)
619 {
620 	struct drm_device *dev = obj->dev;
621 
622 	lockdep_assert_held(&dev->struct_mutex);
623 
624 	/* Check for valid size. */
625 	if (obj_size < vma->vm_end - vma->vm_start)
626 		return -EINVAL;
627 
628 	if (!dev->driver->gem_vm_ops)
629 		return -EINVAL;
630 
631 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
632 	vma->vm_ops = dev->driver->gem_vm_ops;
633 	vma->vm_private_data = obj;
634 	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
635 
636 	/* Take a ref for this mapping of the object, so that the fault
637 	 * handler can dereference the mmap offset's pointer to the object.
638 	 * This reference is cleaned up by the corresponding vm_close
639 	 * (which should happen whether the vma was created by this call, or
640 	 * by a vm_open due to mremap or partial unmap or whatever).
641 	 */
642 	drm_gem_object_reference(obj);
643 
644 	drm_vm_open_locked(dev, vma);
645 	return 0;
646 }
647 EXPORT_SYMBOL(drm_gem_mmap_obj);
648 
649 /**
650  * drm_gem_mmap - memory map routine for GEM objects
651  * @filp: DRM file pointer
652  * @vma: VMA for the area to be mapped
653  *
654  * If a driver supports GEM object mapping, mmap calls on the DRM file
655  * descriptor will end up here.
656  *
657  * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
658  * contain the fake offset we created when the GTT map ioctl was called on
659  * the object) and map it with a call to drm_gem_mmap_obj().
660  */
661 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
662 {
663 	struct drm_file *priv = filp->private_data;
664 	struct drm_device *dev = priv->minor->dev;
665 	struct drm_gem_mm *mm = dev->mm_private;
666 	struct drm_gem_object *obj;
667 	struct drm_vma_offset_node *node;
668 	int ret = 0;
669 
670 	if (drm_device_is_unplugged(dev))
671 		return -ENODEV;
672 
673 	mutex_lock(&dev->struct_mutex);
674 
675 	node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
676 					   vma_pages(vma));
677 	if (!node) {
678 		mutex_unlock(&dev->struct_mutex);
679 		return drm_mmap(filp, vma);
680 	}
681 
682 	obj = container_of(node, struct drm_gem_object, vma_node);
683 	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
684 
685 	mutex_unlock(&dev->struct_mutex);
686 
687 	return ret;
688 }
689 EXPORT_SYMBOL(drm_gem_mmap);
690