1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <drm/drmP.h>
13 
14 #include <linux/shmem_fs.h>
15 #include <drm/exynos_drm.h>
16 
17 #include "exynos_drm_drv.h"
18 #include "exynos_drm_gem.h"
19 #include "exynos_drm_buf.h"
20 
21 static unsigned int convert_to_vm_err_msg(int msg)
22 {
23 	unsigned int out_msg;
24 
25 	switch (msg) {
26 	case 0:
27 	case -ERESTARTSYS:
28 	case -EINTR:
29 		out_msg = VM_FAULT_NOPAGE;
30 		break;
31 
32 	case -ENOMEM:
33 		out_msg = VM_FAULT_OOM;
34 		break;
35 
36 	default:
37 		out_msg = VM_FAULT_SIGBUS;
38 		break;
39 	}
40 
41 	return out_msg;
42 }
43 
44 static int check_gem_flags(unsigned int flags)
45 {
46 	if (flags & ~(EXYNOS_BO_MASK)) {
47 		DRM_ERROR("invalid flags.\n");
48 		return -EINVAL;
49 	}
50 
51 	return 0;
52 }
53 
54 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
55 					struct vm_area_struct *vma)
56 {
57 	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
58 
59 	/* non-cachable as default. */
60 	if (obj->flags & EXYNOS_BO_CACHABLE)
61 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62 	else if (obj->flags & EXYNOS_BO_WC)
63 		vma->vm_page_prot =
64 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
65 	else
66 		vma->vm_page_prot =
67 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
68 }
69 
70 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
71 {
72 	/* TODO */
73 
74 	return roundup(size, PAGE_SIZE);
75 }
76 
77 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
78 					struct vm_area_struct *vma,
79 					unsigned long f_vaddr,
80 					pgoff_t page_offset)
81 {
82 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
83 	struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
84 	struct scatterlist *sgl;
85 	unsigned long pfn;
86 	int i;
87 
88 	if (!buf->sgt)
89 		return -EINTR;
90 
91 	if (page_offset >= (buf->size >> PAGE_SHIFT)) {
92 		DRM_ERROR("invalid page offset\n");
93 		return -EINVAL;
94 	}
95 
96 	sgl = buf->sgt->sgl;
97 	for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
98 		if (page_offset < (sgl->length >> PAGE_SHIFT))
99 			break;
100 		page_offset -=	(sgl->length >> PAGE_SHIFT);
101 	}
102 
103 	pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
104 
105 	return vm_insert_mixed(vma, f_vaddr, pfn);
106 }
107 
108 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
109 					struct drm_file *file_priv,
110 					unsigned int *handle)
111 {
112 	int ret;
113 
114 	/*
115 	 * allocate a id of idr table where the obj is registered
116 	 * and handle has the id what user can see.
117 	 */
118 	ret = drm_gem_handle_create(file_priv, obj, handle);
119 	if (ret)
120 		return ret;
121 
122 	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
123 
124 	/* drop reference from allocate - handle holds it now. */
125 	drm_gem_object_unreference_unlocked(obj);
126 
127 	return 0;
128 }
129 
130 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
131 {
132 	struct drm_gem_object *obj;
133 	struct exynos_drm_gem_buf *buf;
134 
135 	DRM_DEBUG_KMS("%s\n", __FILE__);
136 
137 	obj = &exynos_gem_obj->base;
138 	buf = exynos_gem_obj->buffer;
139 
140 	DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
141 
142 	/*
143 	 * do not release memory region from exporter.
144 	 *
145 	 * the region will be released by exporter
146 	 * once dmabuf's refcount becomes 0.
147 	 */
148 	if (obj->import_attach)
149 		goto out;
150 
151 	exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
152 
153 out:
154 	exynos_drm_fini_buf(obj->dev, buf);
155 	exynos_gem_obj->buffer = NULL;
156 
157 	if (obj->map_list.map)
158 		drm_gem_free_mmap_offset(obj);
159 
160 	/* release file pointer to gem object. */
161 	drm_gem_object_release(obj);
162 
163 	kfree(exynos_gem_obj);
164 	exynos_gem_obj = NULL;
165 }
166 
167 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
168 						      unsigned long size)
169 {
170 	struct exynos_drm_gem_obj *exynos_gem_obj;
171 	struct drm_gem_object *obj;
172 	int ret;
173 
174 	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
175 	if (!exynos_gem_obj) {
176 		DRM_ERROR("failed to allocate exynos gem object\n");
177 		return NULL;
178 	}
179 
180 	exynos_gem_obj->size = size;
181 	obj = &exynos_gem_obj->base;
182 
183 	ret = drm_gem_object_init(dev, obj, size);
184 	if (ret < 0) {
185 		DRM_ERROR("failed to initialize gem object\n");
186 		kfree(exynos_gem_obj);
187 		return NULL;
188 	}
189 
190 	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
191 
192 	return exynos_gem_obj;
193 }
194 
195 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
196 						unsigned int flags,
197 						unsigned long size)
198 {
199 	struct exynos_drm_gem_obj *exynos_gem_obj;
200 	struct exynos_drm_gem_buf *buf;
201 	int ret;
202 
203 	if (!size) {
204 		DRM_ERROR("invalid size.\n");
205 		return ERR_PTR(-EINVAL);
206 	}
207 
208 	size = roundup_gem_size(size, flags);
209 	DRM_DEBUG_KMS("%s\n", __FILE__);
210 
211 	ret = check_gem_flags(flags);
212 	if (ret)
213 		return ERR_PTR(ret);
214 
215 	buf = exynos_drm_init_buf(dev, size);
216 	if (!buf)
217 		return ERR_PTR(-ENOMEM);
218 
219 	exynos_gem_obj = exynos_drm_gem_init(dev, size);
220 	if (!exynos_gem_obj) {
221 		ret = -ENOMEM;
222 		goto err_fini_buf;
223 	}
224 
225 	exynos_gem_obj->buffer = buf;
226 
227 	/* set memory type and cache attribute from user side. */
228 	exynos_gem_obj->flags = flags;
229 
230 	ret = exynos_drm_alloc_buf(dev, buf, flags);
231 	if (ret < 0) {
232 		drm_gem_object_release(&exynos_gem_obj->base);
233 		goto err_fini_buf;
234 	}
235 
236 	return exynos_gem_obj;
237 
238 err_fini_buf:
239 	exynos_drm_fini_buf(dev, buf);
240 	return ERR_PTR(ret);
241 }
242 
243 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
244 				struct drm_file *file_priv)
245 {
246 	struct drm_exynos_gem_create *args = data;
247 	struct exynos_drm_gem_obj *exynos_gem_obj;
248 	int ret;
249 
250 	DRM_DEBUG_KMS("%s\n", __FILE__);
251 
252 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
253 	if (IS_ERR(exynos_gem_obj))
254 		return PTR_ERR(exynos_gem_obj);
255 
256 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
257 			&args->handle);
258 	if (ret) {
259 		exynos_drm_gem_destroy(exynos_gem_obj);
260 		return ret;
261 	}
262 
263 	return 0;
264 }
265 
266 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
267 					unsigned int gem_handle,
268 					struct drm_file *filp)
269 {
270 	struct exynos_drm_gem_obj *exynos_gem_obj;
271 	struct drm_gem_object *obj;
272 
273 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
274 	if (!obj) {
275 		DRM_ERROR("failed to lookup gem object.\n");
276 		return ERR_PTR(-EINVAL);
277 	}
278 
279 	exynos_gem_obj = to_exynos_gem_obj(obj);
280 
281 	return &exynos_gem_obj->buffer->dma_addr;
282 }
283 
284 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
285 					unsigned int gem_handle,
286 					struct drm_file *filp)
287 {
288 	struct exynos_drm_gem_obj *exynos_gem_obj;
289 	struct drm_gem_object *obj;
290 
291 	obj = drm_gem_object_lookup(dev, filp, gem_handle);
292 	if (!obj) {
293 		DRM_ERROR("failed to lookup gem object.\n");
294 		return;
295 	}
296 
297 	exynos_gem_obj = to_exynos_gem_obj(obj);
298 
299 	drm_gem_object_unreference_unlocked(obj);
300 
301 	/*
302 	 * decrease obj->refcount one more time because we has already
303 	 * increased it at exynos_drm_gem_get_dma_addr().
304 	 */
305 	drm_gem_object_unreference_unlocked(obj);
306 }
307 
308 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
309 				    struct drm_file *file_priv)
310 {
311 	struct drm_exynos_gem_map_off *args = data;
312 
313 	DRM_DEBUG_KMS("%s\n", __FILE__);
314 
315 	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
316 			args->handle, (unsigned long)args->offset);
317 
318 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
319 		DRM_ERROR("does not support GEM.\n");
320 		return -ENODEV;
321 	}
322 
323 	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
324 			&args->offset);
325 }
326 
327 static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
328 							struct file *filp)
329 {
330 	struct drm_file *file_priv;
331 
332 	mutex_lock(&drm_dev->struct_mutex);
333 
334 	/* find current process's drm_file from filelist. */
335 	list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
336 		if (file_priv->filp == filp) {
337 			mutex_unlock(&drm_dev->struct_mutex);
338 			return file_priv;
339 		}
340 	}
341 
342 	mutex_unlock(&drm_dev->struct_mutex);
343 	WARN_ON(1);
344 
345 	return ERR_PTR(-EFAULT);
346 }
347 
348 static int exynos_drm_gem_mmap_buffer(struct file *filp,
349 				      struct vm_area_struct *vma)
350 {
351 	struct drm_gem_object *obj = filp->private_data;
352 	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
353 	struct drm_device *drm_dev = obj->dev;
354 	struct exynos_drm_gem_buf *buffer;
355 	struct drm_file *file_priv;
356 	unsigned long vm_size;
357 	int ret;
358 
359 	DRM_DEBUG_KMS("%s\n", __FILE__);
360 
361 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
362 	vma->vm_private_data = obj;
363 	vma->vm_ops = drm_dev->driver->gem_vm_ops;
364 
365 	/* restore it to driver's fops. */
366 	filp->f_op = fops_get(drm_dev->driver->fops);
367 
368 	file_priv = exynos_drm_find_drm_file(drm_dev, filp);
369 	if (IS_ERR(file_priv))
370 		return PTR_ERR(file_priv);
371 
372 	/* restore it to drm_file. */
373 	filp->private_data = file_priv;
374 
375 	update_vm_cache_attr(exynos_gem_obj, vma);
376 
377 	vm_size = vma->vm_end - vma->vm_start;
378 
379 	/*
380 	 * a buffer contains information to physically continuous memory
381 	 * allocated by user request or at framebuffer creation.
382 	 */
383 	buffer = exynos_gem_obj->buffer;
384 
385 	/* check if user-requested size is valid. */
386 	if (vm_size > buffer->size)
387 		return -EINVAL;
388 
389 	ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
390 				buffer->dma_addr, buffer->size,
391 				&buffer->dma_attrs);
392 	if (ret < 0) {
393 		DRM_ERROR("failed to mmap.\n");
394 		return ret;
395 	}
396 
397 	/*
398 	 * take a reference to this mapping of the object. And this reference
399 	 * is unreferenced by the corresponding vm_close call.
400 	 */
401 	drm_gem_object_reference(obj);
402 
403 	mutex_lock(&drm_dev->struct_mutex);
404 	drm_vm_open_locked(drm_dev, vma);
405 	mutex_unlock(&drm_dev->struct_mutex);
406 
407 	return 0;
408 }
409 
410 static const struct file_operations exynos_drm_gem_fops = {
411 	.mmap = exynos_drm_gem_mmap_buffer,
412 };
413 
414 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
415 			      struct drm_file *file_priv)
416 {
417 	struct drm_exynos_gem_mmap *args = data;
418 	struct drm_gem_object *obj;
419 	unsigned int addr;
420 
421 	DRM_DEBUG_KMS("%s\n", __FILE__);
422 
423 	if (!(dev->driver->driver_features & DRIVER_GEM)) {
424 		DRM_ERROR("does not support GEM.\n");
425 		return -ENODEV;
426 	}
427 
428 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
429 	if (!obj) {
430 		DRM_ERROR("failed to lookup gem object.\n");
431 		return -EINVAL;
432 	}
433 
434 	/*
435 	 * Set specific mmper's fops. And it will be restored by
436 	 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
437 	 * This is used to call specific mapper temporarily.
438 	 */
439 	file_priv->filp->f_op = &exynos_drm_gem_fops;
440 
441 	/*
442 	 * Set gem object to private_data so that specific mmaper
443 	 * can get the gem object. And it will be restored by
444 	 * exynos_drm_gem_mmap_buffer to drm_file.
445 	 */
446 	file_priv->filp->private_data = obj;
447 
448 	addr = vm_mmap(file_priv->filp, 0, args->size,
449 			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
450 
451 	drm_gem_object_unreference_unlocked(obj);
452 
453 	if (IS_ERR((void *)addr)) {
454 		file_priv->filp->private_data = file_priv;
455 		return PTR_ERR((void *)addr);
456 	}
457 
458 	args->mapped = addr;
459 
460 	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
461 
462 	return 0;
463 }
464 
465 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
466 				      struct drm_file *file_priv)
467 {	struct exynos_drm_gem_obj *exynos_gem_obj;
468 	struct drm_exynos_gem_info *args = data;
469 	struct drm_gem_object *obj;
470 
471 	mutex_lock(&dev->struct_mutex);
472 
473 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
474 	if (!obj) {
475 		DRM_ERROR("failed to lookup gem object.\n");
476 		mutex_unlock(&dev->struct_mutex);
477 		return -EINVAL;
478 	}
479 
480 	exynos_gem_obj = to_exynos_gem_obj(obj);
481 
482 	args->flags = exynos_gem_obj->flags;
483 	args->size = exynos_gem_obj->size;
484 
485 	drm_gem_object_unreference(obj);
486 	mutex_unlock(&dev->struct_mutex);
487 
488 	return 0;
489 }
490 
491 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
492 {
493 	struct vm_area_struct *vma_copy;
494 
495 	vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
496 	if (!vma_copy)
497 		return NULL;
498 
499 	if (vma->vm_ops && vma->vm_ops->open)
500 		vma->vm_ops->open(vma);
501 
502 	if (vma->vm_file)
503 		get_file(vma->vm_file);
504 
505 	memcpy(vma_copy, vma, sizeof(*vma));
506 
507 	vma_copy->vm_mm = NULL;
508 	vma_copy->vm_next = NULL;
509 	vma_copy->vm_prev = NULL;
510 
511 	return vma_copy;
512 }
513 
514 void exynos_gem_put_vma(struct vm_area_struct *vma)
515 {
516 	if (!vma)
517 		return;
518 
519 	if (vma->vm_ops && vma->vm_ops->close)
520 		vma->vm_ops->close(vma);
521 
522 	if (vma->vm_file)
523 		fput(vma->vm_file);
524 
525 	kfree(vma);
526 }
527 
528 int exynos_gem_get_pages_from_userptr(unsigned long start,
529 						unsigned int npages,
530 						struct page **pages,
531 						struct vm_area_struct *vma)
532 {
533 	int get_npages;
534 
535 	/* the memory region mmaped with VM_PFNMAP. */
536 	if (vma_is_io(vma)) {
537 		unsigned int i;
538 
539 		for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
540 			unsigned long pfn;
541 			int ret = follow_pfn(vma, start, &pfn);
542 			if (ret)
543 				return ret;
544 
545 			pages[i] = pfn_to_page(pfn);
546 		}
547 
548 		if (i != npages) {
549 			DRM_ERROR("failed to get user_pages.\n");
550 			return -EINVAL;
551 		}
552 
553 		return 0;
554 	}
555 
556 	get_npages = get_user_pages(current, current->mm, start,
557 					npages, 1, 1, pages, NULL);
558 	get_npages = max(get_npages, 0);
559 	if (get_npages != npages) {
560 		DRM_ERROR("failed to get user_pages.\n");
561 		while (get_npages)
562 			put_page(pages[--get_npages]);
563 		return -EFAULT;
564 	}
565 
566 	return 0;
567 }
568 
569 void exynos_gem_put_pages_to_userptr(struct page **pages,
570 					unsigned int npages,
571 					struct vm_area_struct *vma)
572 {
573 	if (!vma_is_io(vma)) {
574 		unsigned int i;
575 
576 		for (i = 0; i < npages; i++) {
577 			set_page_dirty_lock(pages[i]);
578 
579 			/*
580 			 * undo the reference we took when populating
581 			 * the table.
582 			 */
583 			put_page(pages[i]);
584 		}
585 	}
586 }
587 
588 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
589 				struct sg_table *sgt,
590 				enum dma_data_direction dir)
591 {
592 	int nents;
593 
594 	mutex_lock(&drm_dev->struct_mutex);
595 
596 	nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
597 	if (!nents) {
598 		DRM_ERROR("failed to map sgl with dma.\n");
599 		mutex_unlock(&drm_dev->struct_mutex);
600 		return nents;
601 	}
602 
603 	mutex_unlock(&drm_dev->struct_mutex);
604 	return 0;
605 }
606 
607 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
608 				struct sg_table *sgt,
609 				enum dma_data_direction dir)
610 {
611 	dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
612 }
613 
614 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
615 {
616 	DRM_DEBUG_KMS("%s\n", __FILE__);
617 
618 	return 0;
619 }
620 
621 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
622 {
623 	struct exynos_drm_gem_obj *exynos_gem_obj;
624 	struct exynos_drm_gem_buf *buf;
625 
626 	DRM_DEBUG_KMS("%s\n", __FILE__);
627 
628 	exynos_gem_obj = to_exynos_gem_obj(obj);
629 	buf = exynos_gem_obj->buffer;
630 
631 	if (obj->import_attach)
632 		drm_prime_gem_destroy(obj, buf->sgt);
633 
634 	exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
635 }
636 
637 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
638 			       struct drm_device *dev,
639 			       struct drm_mode_create_dumb *args)
640 {
641 	struct exynos_drm_gem_obj *exynos_gem_obj;
642 	int ret;
643 
644 	DRM_DEBUG_KMS("%s\n", __FILE__);
645 
646 	/*
647 	 * alocate memory to be used for framebuffer.
648 	 * - this callback would be called by user application
649 	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
650 	 */
651 
652 	args->pitch = args->width * ((args->bpp + 7) / 8);
653 	args->size = args->pitch * args->height;
654 
655 	exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
656 	if (IS_ERR(exynos_gem_obj))
657 		return PTR_ERR(exynos_gem_obj);
658 
659 	ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
660 			&args->handle);
661 	if (ret) {
662 		exynos_drm_gem_destroy(exynos_gem_obj);
663 		return ret;
664 	}
665 
666 	return 0;
667 }
668 
669 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
670 				   struct drm_device *dev, uint32_t handle,
671 				   uint64_t *offset)
672 {
673 	struct drm_gem_object *obj;
674 	int ret = 0;
675 
676 	DRM_DEBUG_KMS("%s\n", __FILE__);
677 
678 	mutex_lock(&dev->struct_mutex);
679 
680 	/*
681 	 * get offset of memory allocated for drm framebuffer.
682 	 * - this callback would be called by user application
683 	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
684 	 */
685 
686 	obj = drm_gem_object_lookup(dev, file_priv, handle);
687 	if (!obj) {
688 		DRM_ERROR("failed to lookup gem object.\n");
689 		ret = -EINVAL;
690 		goto unlock;
691 	}
692 
693 	if (!obj->map_list.map) {
694 		ret = drm_gem_create_mmap_offset(obj);
695 		if (ret)
696 			goto out;
697 	}
698 
699 	*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
700 	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
701 
702 out:
703 	drm_gem_object_unreference(obj);
704 unlock:
705 	mutex_unlock(&dev->struct_mutex);
706 	return ret;
707 }
708 
709 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
710 				struct drm_device *dev,
711 				unsigned int handle)
712 {
713 	int ret;
714 
715 	DRM_DEBUG_KMS("%s\n", __FILE__);
716 
717 	/*
718 	 * obj->refcount and obj->handle_count are decreased and
719 	 * if both them are 0 then exynos_drm_gem_free_object()
720 	 * would be called by callback to release resources.
721 	 */
722 	ret = drm_gem_handle_delete(file_priv, handle);
723 	if (ret < 0) {
724 		DRM_ERROR("failed to delete drm_gem_handle.\n");
725 		return ret;
726 	}
727 
728 	return 0;
729 }
730 
731 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
732 {
733 	struct drm_gem_object *obj = vma->vm_private_data;
734 	struct drm_device *dev = obj->dev;
735 	unsigned long f_vaddr;
736 	pgoff_t page_offset;
737 	int ret;
738 
739 	page_offset = ((unsigned long)vmf->virtual_address -
740 			vma->vm_start) >> PAGE_SHIFT;
741 	f_vaddr = (unsigned long)vmf->virtual_address;
742 
743 	mutex_lock(&dev->struct_mutex);
744 
745 	ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
746 	if (ret < 0)
747 		DRM_ERROR("failed to map a buffer with user.\n");
748 
749 	mutex_unlock(&dev->struct_mutex);
750 
751 	return convert_to_vm_err_msg(ret);
752 }
753 
754 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
755 {
756 	struct exynos_drm_gem_obj *exynos_gem_obj;
757 	struct drm_gem_object *obj;
758 	int ret;
759 
760 	DRM_DEBUG_KMS("%s\n", __FILE__);
761 
762 	/* set vm_area_struct. */
763 	ret = drm_gem_mmap(filp, vma);
764 	if (ret < 0) {
765 		DRM_ERROR("failed to mmap.\n");
766 		return ret;
767 	}
768 
769 	obj = vma->vm_private_data;
770 	exynos_gem_obj = to_exynos_gem_obj(obj);
771 
772 	ret = check_gem_flags(exynos_gem_obj->flags);
773 	if (ret) {
774 		drm_gem_vm_close(vma);
775 		drm_gem_free_mmap_offset(obj);
776 		return ret;
777 	}
778 
779 	vma->vm_flags &= ~VM_PFNMAP;
780 	vma->vm_flags |= VM_MIXEDMAP;
781 
782 	update_vm_cache_attr(exynos_gem_obj, vma);
783 
784 	return ret;
785 }
786