1 /* exynos_drm_gem.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Author: Inki Dae <inki.dae@samsung.com> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include "drmP.h" 27 #include "drm.h" 28 29 #include <linux/shmem_fs.h> 30 #include <drm/exynos_drm.h> 31 32 #include "exynos_drm_drv.h" 33 #include "exynos_drm_gem.h" 34 #include "exynos_drm_buf.h" 35 36 static unsigned int convert_to_vm_err_msg(int msg) 37 { 38 unsigned int out_msg; 39 40 switch (msg) { 41 case 0: 42 case -ERESTARTSYS: 43 case -EINTR: 44 out_msg = VM_FAULT_NOPAGE; 45 break; 46 47 case -ENOMEM: 48 out_msg = VM_FAULT_OOM; 49 break; 50 51 default: 52 out_msg = VM_FAULT_SIGBUS; 53 break; 54 } 55 56 return out_msg; 57 } 58 59 static int check_gem_flags(unsigned int flags) 60 { 61 if (flags & ~(EXYNOS_BO_MASK)) { 62 DRM_ERROR("invalid flags.\n"); 63 return -EINVAL; 64 } 65 66 return 0; 67 } 68 69 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj, 70 struct vm_area_struct *vma) 71 { 72 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags); 73 74 /* non-cachable as default. */ 75 if (obj->flags & EXYNOS_BO_CACHABLE) 76 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 77 else if (obj->flags & EXYNOS_BO_WC) 78 vma->vm_page_prot = 79 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 80 else 81 vma->vm_page_prot = 82 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 83 } 84 85 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 86 { 87 if (!IS_NONCONTIG_BUFFER(flags)) { 88 if (size >= SZ_1M) 89 return roundup(size, SECTION_SIZE); 90 else if (size >= SZ_64K) 91 return roundup(size, SZ_64K); 92 else 93 goto out; 94 } 95 out: 96 return roundup(size, PAGE_SIZE); 97 } 98 99 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 100 gfp_t gfpmask) 101 { 102 struct page *p, **pages; 103 int i, npages; 104 105 npages = obj->size >> PAGE_SHIFT; 106 107 pages = drm_malloc_ab(npages, sizeof(struct page *)); 108 if (pages == NULL) 109 return ERR_PTR(-ENOMEM); 110 111 for (i = 0; i < npages; i++) { 112 p = alloc_page(gfpmask); 113 if (IS_ERR(p)) 114 goto fail; 115 pages[i] = p; 116 } 117 118 return pages; 119 120 fail: 121 while (--i) 122 __free_page(pages[i]); 123 124 drm_free_large(pages); 125 return ERR_PTR(PTR_ERR(p)); 126 } 127 128 static void exynos_gem_put_pages(struct drm_gem_object *obj, 129 struct page **pages) 130 { 131 int npages; 132 133 npages = obj->size >> PAGE_SHIFT; 134 135 while (--npages >= 0) 136 __free_page(pages[npages]); 137 138 drm_free_large(pages); 139 } 140 141 static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 142 struct vm_area_struct *vma, 143 unsigned long f_vaddr, 144 pgoff_t page_offset) 145 { 146 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 147 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 148 unsigned long pfn; 149 150 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 151 if (!buf->pages) 152 return -EINTR; 153 154 pfn = page_to_pfn(buf->pages[page_offset++]); 155 } else 156 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; 157 158 return vm_insert_mixed(vma, f_vaddr, pfn); 159 } 160 161 static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) 162 { 163 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 164 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 165 struct scatterlist *sgl; 166 struct page **pages; 167 unsigned int npages, i = 0; 168 int ret; 169 170 if (buf->pages) { 171 DRM_DEBUG_KMS("already allocated.\n"); 172 return -EINVAL; 173 } 174 175 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE); 176 if (IS_ERR(pages)) { 177 DRM_ERROR("failed to get pages.\n"); 178 return PTR_ERR(pages); 179 } 180 181 npages = obj->size >> PAGE_SHIFT; 182 buf->page_size = PAGE_SIZE; 183 184 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 185 if (!buf->sgt) { 186 DRM_ERROR("failed to allocate sg table.\n"); 187 ret = -ENOMEM; 188 goto err; 189 } 190 191 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 192 if (ret < 0) { 193 DRM_ERROR("failed to initialize sg table.\n"); 194 ret = -EFAULT; 195 goto err1; 196 } 197 198 sgl = buf->sgt->sgl; 199 200 /* set all pages to sg list. */ 201 while (i < npages) { 202 sg_set_page(sgl, pages[i], PAGE_SIZE, 0); 203 sg_dma_address(sgl) = page_to_phys(pages[i]); 204 i++; 205 sgl = sg_next(sgl); 206 } 207 208 /* add some codes for UNCACHED type here. TODO */ 209 210 buf->pages = pages; 211 return ret; 212 err1: 213 kfree(buf->sgt); 214 buf->sgt = NULL; 215 err: 216 exynos_gem_put_pages(obj, pages); 217 return ret; 218 219 } 220 221 static void exynos_drm_gem_put_pages(struct drm_gem_object *obj) 222 { 223 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 224 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 225 226 /* 227 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages 228 * allocated at gem fault handler. 229 */ 230 sg_free_table(buf->sgt); 231 kfree(buf->sgt); 232 buf->sgt = NULL; 233 234 exynos_gem_put_pages(obj, buf->pages); 235 buf->pages = NULL; 236 237 /* add some codes for UNCACHED type here. TODO */ 238 } 239 240 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 241 struct drm_file *file_priv, 242 unsigned int *handle) 243 { 244 int ret; 245 246 /* 247 * allocate a id of idr table where the obj is registered 248 * and handle has the id what user can see. 249 */ 250 ret = drm_gem_handle_create(file_priv, obj, handle); 251 if (ret) 252 return ret; 253 254 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); 255 256 /* drop reference from allocate - handle holds it now. */ 257 drm_gem_object_unreference_unlocked(obj); 258 259 return 0; 260 } 261 262 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) 263 { 264 struct drm_gem_object *obj; 265 struct exynos_drm_gem_buf *buf; 266 267 DRM_DEBUG_KMS("%s\n", __FILE__); 268 269 obj = &exynos_gem_obj->base; 270 buf = exynos_gem_obj->buffer; 271 272 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 273 274 if (!buf->pages) 275 return; 276 277 /* 278 * do not release memory region from exporter. 279 * 280 * the region will be released by exporter 281 * once dmabuf's refcount becomes 0. 282 */ 283 if (obj->import_attach) 284 goto out; 285 286 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 287 exynos_drm_gem_put_pages(obj); 288 else 289 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); 290 291 out: 292 exynos_drm_fini_buf(obj->dev, buf); 293 exynos_gem_obj->buffer = NULL; 294 295 if (obj->map_list.map) 296 drm_gem_free_mmap_offset(obj); 297 298 /* release file pointer to gem object. */ 299 drm_gem_object_release(obj); 300 301 kfree(exynos_gem_obj); 302 exynos_gem_obj = NULL; 303 } 304 305 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, 306 unsigned long size) 307 { 308 struct exynos_drm_gem_obj *exynos_gem_obj; 309 struct drm_gem_object *obj; 310 int ret; 311 312 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 313 if (!exynos_gem_obj) { 314 DRM_ERROR("failed to allocate exynos gem object\n"); 315 return NULL; 316 } 317 318 exynos_gem_obj->size = size; 319 obj = &exynos_gem_obj->base; 320 321 ret = drm_gem_object_init(dev, obj, size); 322 if (ret < 0) { 323 DRM_ERROR("failed to initialize gem object\n"); 324 kfree(exynos_gem_obj); 325 return NULL; 326 } 327 328 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 329 330 return exynos_gem_obj; 331 } 332 333 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, 334 unsigned int flags, 335 unsigned long size) 336 { 337 struct exynos_drm_gem_obj *exynos_gem_obj; 338 struct exynos_drm_gem_buf *buf; 339 int ret; 340 341 if (!size) { 342 DRM_ERROR("invalid size.\n"); 343 return ERR_PTR(-EINVAL); 344 } 345 346 size = roundup_gem_size(size, flags); 347 DRM_DEBUG_KMS("%s\n", __FILE__); 348 349 ret = check_gem_flags(flags); 350 if (ret) 351 return ERR_PTR(ret); 352 353 buf = exynos_drm_init_buf(dev, size); 354 if (!buf) 355 return ERR_PTR(-ENOMEM); 356 357 exynos_gem_obj = exynos_drm_gem_init(dev, size); 358 if (!exynos_gem_obj) { 359 ret = -ENOMEM; 360 goto err_fini_buf; 361 } 362 363 exynos_gem_obj->buffer = buf; 364 365 /* set memory type and cache attribute from user side. */ 366 exynos_gem_obj->flags = flags; 367 368 /* 369 * allocate all pages as desired size if user wants to allocate 370 * physically non-continuous memory. 371 */ 372 if (flags & EXYNOS_BO_NONCONTIG) { 373 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); 374 if (ret < 0) { 375 drm_gem_object_release(&exynos_gem_obj->base); 376 goto err_fini_buf; 377 } 378 } else { 379 ret = exynos_drm_alloc_buf(dev, buf, flags); 380 if (ret < 0) { 381 drm_gem_object_release(&exynos_gem_obj->base); 382 goto err_fini_buf; 383 } 384 } 385 386 return exynos_gem_obj; 387 388 err_fini_buf: 389 exynos_drm_fini_buf(dev, buf); 390 return ERR_PTR(ret); 391 } 392 393 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 394 struct drm_file *file_priv) 395 { 396 struct drm_exynos_gem_create *args = data; 397 struct exynos_drm_gem_obj *exynos_gem_obj; 398 int ret; 399 400 DRM_DEBUG_KMS("%s\n", __FILE__); 401 402 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 403 if (IS_ERR(exynos_gem_obj)) 404 return PTR_ERR(exynos_gem_obj); 405 406 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 407 &args->handle); 408 if (ret) { 409 exynos_drm_gem_destroy(exynos_gem_obj); 410 return ret; 411 } 412 413 return 0; 414 } 415 416 void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 417 unsigned int gem_handle, 418 struct drm_file *file_priv) 419 { 420 struct exynos_drm_gem_obj *exynos_gem_obj; 421 struct drm_gem_object *obj; 422 423 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 424 if (!obj) { 425 DRM_ERROR("failed to lookup gem object.\n"); 426 return ERR_PTR(-EINVAL); 427 } 428 429 exynos_gem_obj = to_exynos_gem_obj(obj); 430 431 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 432 DRM_DEBUG_KMS("not support NONCONTIG type.\n"); 433 drm_gem_object_unreference_unlocked(obj); 434 435 /* TODO */ 436 return ERR_PTR(-EINVAL); 437 } 438 439 return &exynos_gem_obj->buffer->dma_addr; 440 } 441 442 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 443 unsigned int gem_handle, 444 struct drm_file *file_priv) 445 { 446 struct exynos_drm_gem_obj *exynos_gem_obj; 447 struct drm_gem_object *obj; 448 449 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 450 if (!obj) { 451 DRM_ERROR("failed to lookup gem object.\n"); 452 return; 453 } 454 455 exynos_gem_obj = to_exynos_gem_obj(obj); 456 457 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 458 DRM_DEBUG_KMS("not support NONCONTIG type.\n"); 459 drm_gem_object_unreference_unlocked(obj); 460 461 /* TODO */ 462 return; 463 } 464 465 drm_gem_object_unreference_unlocked(obj); 466 467 /* 468 * decrease obj->refcount one more time because we has already 469 * increased it at exynos_drm_gem_get_dma_addr(). 470 */ 471 drm_gem_object_unreference_unlocked(obj); 472 } 473 474 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 475 struct drm_file *file_priv) 476 { 477 struct drm_exynos_gem_map_off *args = data; 478 479 DRM_DEBUG_KMS("%s\n", __FILE__); 480 481 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n", 482 args->handle, (unsigned long)args->offset); 483 484 if (!(dev->driver->driver_features & DRIVER_GEM)) { 485 DRM_ERROR("does not support GEM.\n"); 486 return -ENODEV; 487 } 488 489 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, 490 &args->offset); 491 } 492 493 static int exynos_drm_gem_mmap_buffer(struct file *filp, 494 struct vm_area_struct *vma) 495 { 496 struct drm_gem_object *obj = filp->private_data; 497 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 498 struct exynos_drm_gem_buf *buffer; 499 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 500 int ret; 501 502 DRM_DEBUG_KMS("%s\n", __FILE__); 503 504 vma->vm_flags |= (VM_IO | VM_RESERVED); 505 506 update_vm_cache_attr(exynos_gem_obj, vma); 507 508 vm_size = usize = vma->vm_end - vma->vm_start; 509 510 /* 511 * a buffer contains information to physically continuous memory 512 * allocated by user request or at framebuffer creation. 513 */ 514 buffer = exynos_gem_obj->buffer; 515 516 /* check if user-requested size is valid. */ 517 if (vm_size > buffer->size) 518 return -EINVAL; 519 520 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 521 int i = 0; 522 523 if (!buffer->pages) 524 return -EINVAL; 525 526 vma->vm_flags |= VM_MIXEDMAP; 527 528 do { 529 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 530 if (ret) { 531 DRM_ERROR("failed to remap user space.\n"); 532 return ret; 533 } 534 535 uaddr += PAGE_SIZE; 536 usize -= PAGE_SIZE; 537 } while (usize > 0); 538 } else { 539 /* 540 * get page frame number to physical memory to be mapped 541 * to user space. 542 */ 543 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> 544 PAGE_SHIFT; 545 546 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); 547 548 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size, 549 vma->vm_page_prot)) { 550 DRM_ERROR("failed to remap pfn range.\n"); 551 return -EAGAIN; 552 } 553 } 554 555 return 0; 556 } 557 558 static const struct file_operations exynos_drm_gem_fops = { 559 .mmap = exynos_drm_gem_mmap_buffer, 560 }; 561 562 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 563 struct drm_file *file_priv) 564 { 565 struct drm_exynos_gem_mmap *args = data; 566 struct drm_gem_object *obj; 567 unsigned int addr; 568 569 DRM_DEBUG_KMS("%s\n", __FILE__); 570 571 if (!(dev->driver->driver_features & DRIVER_GEM)) { 572 DRM_ERROR("does not support GEM.\n"); 573 return -ENODEV; 574 } 575 576 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 577 if (!obj) { 578 DRM_ERROR("failed to lookup gem object.\n"); 579 return -EINVAL; 580 } 581 582 obj->filp->f_op = &exynos_drm_gem_fops; 583 obj->filp->private_data = obj; 584 585 addr = vm_mmap(obj->filp, 0, args->size, 586 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 587 588 drm_gem_object_unreference_unlocked(obj); 589 590 if (IS_ERR((void *)addr)) 591 return PTR_ERR((void *)addr); 592 593 args->mapped = addr; 594 595 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); 596 597 return 0; 598 } 599 600 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 601 struct drm_file *file_priv) 602 { struct exynos_drm_gem_obj *exynos_gem_obj; 603 struct drm_exynos_gem_info *args = data; 604 struct drm_gem_object *obj; 605 606 mutex_lock(&dev->struct_mutex); 607 608 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 609 if (!obj) { 610 DRM_ERROR("failed to lookup gem object.\n"); 611 mutex_unlock(&dev->struct_mutex); 612 return -EINVAL; 613 } 614 615 exynos_gem_obj = to_exynos_gem_obj(obj); 616 617 args->flags = exynos_gem_obj->flags; 618 args->size = exynos_gem_obj->size; 619 620 drm_gem_object_unreference(obj); 621 mutex_unlock(&dev->struct_mutex); 622 623 return 0; 624 } 625 626 int exynos_drm_gem_init_object(struct drm_gem_object *obj) 627 { 628 DRM_DEBUG_KMS("%s\n", __FILE__); 629 630 return 0; 631 } 632 633 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 634 { 635 struct exynos_drm_gem_obj *exynos_gem_obj; 636 struct exynos_drm_gem_buf *buf; 637 638 DRM_DEBUG_KMS("%s\n", __FILE__); 639 640 exynos_gem_obj = to_exynos_gem_obj(obj); 641 buf = exynos_gem_obj->buffer; 642 643 if (obj->import_attach) 644 drm_prime_gem_destroy(obj, buf->sgt); 645 646 exynos_drm_gem_destroy(to_exynos_gem_obj(obj)); 647 } 648 649 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 650 struct drm_device *dev, 651 struct drm_mode_create_dumb *args) 652 { 653 struct exynos_drm_gem_obj *exynos_gem_obj; 654 int ret; 655 656 DRM_DEBUG_KMS("%s\n", __FILE__); 657 658 /* 659 * alocate memory to be used for framebuffer. 660 * - this callback would be called by user application 661 * with DRM_IOCTL_MODE_CREATE_DUMB command. 662 */ 663 664 args->pitch = args->width * ((args->bpp + 7) / 8); 665 args->size = PAGE_ALIGN(args->pitch * args->height); 666 667 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); 668 if (IS_ERR(exynos_gem_obj)) 669 return PTR_ERR(exynos_gem_obj); 670 671 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv, 672 &args->handle); 673 if (ret) { 674 exynos_drm_gem_destroy(exynos_gem_obj); 675 return ret; 676 } 677 678 return 0; 679 } 680 681 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, 682 struct drm_device *dev, uint32_t handle, 683 uint64_t *offset) 684 { 685 struct drm_gem_object *obj; 686 int ret = 0; 687 688 DRM_DEBUG_KMS("%s\n", __FILE__); 689 690 mutex_lock(&dev->struct_mutex); 691 692 /* 693 * get offset of memory allocated for drm framebuffer. 694 * - this callback would be called by user application 695 * with DRM_IOCTL_MODE_MAP_DUMB command. 696 */ 697 698 obj = drm_gem_object_lookup(dev, file_priv, handle); 699 if (!obj) { 700 DRM_ERROR("failed to lookup gem object.\n"); 701 ret = -EINVAL; 702 goto unlock; 703 } 704 705 if (!obj->map_list.map) { 706 ret = drm_gem_create_mmap_offset(obj); 707 if (ret) 708 goto out; 709 } 710 711 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; 712 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 713 714 out: 715 drm_gem_object_unreference(obj); 716 unlock: 717 mutex_unlock(&dev->struct_mutex); 718 return ret; 719 } 720 721 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, 722 struct drm_device *dev, 723 unsigned int handle) 724 { 725 int ret; 726 727 DRM_DEBUG_KMS("%s\n", __FILE__); 728 729 /* 730 * obj->refcount and obj->handle_count are decreased and 731 * if both them are 0 then exynos_drm_gem_free_object() 732 * would be called by callback to release resources. 733 */ 734 ret = drm_gem_handle_delete(file_priv, handle); 735 if (ret < 0) { 736 DRM_ERROR("failed to delete drm_gem_handle.\n"); 737 return ret; 738 } 739 740 return 0; 741 } 742 743 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 744 { 745 struct drm_gem_object *obj = vma->vm_private_data; 746 struct drm_device *dev = obj->dev; 747 unsigned long f_vaddr; 748 pgoff_t page_offset; 749 int ret; 750 751 page_offset = ((unsigned long)vmf->virtual_address - 752 vma->vm_start) >> PAGE_SHIFT; 753 f_vaddr = (unsigned long)vmf->virtual_address; 754 755 mutex_lock(&dev->struct_mutex); 756 757 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 758 if (ret < 0) 759 DRM_ERROR("failed to map pages.\n"); 760 761 mutex_unlock(&dev->struct_mutex); 762 763 return convert_to_vm_err_msg(ret); 764 } 765 766 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 767 { 768 struct exynos_drm_gem_obj *exynos_gem_obj; 769 struct drm_gem_object *obj; 770 int ret; 771 772 DRM_DEBUG_KMS("%s\n", __FILE__); 773 774 /* set vm_area_struct. */ 775 ret = drm_gem_mmap(filp, vma); 776 if (ret < 0) { 777 DRM_ERROR("failed to mmap.\n"); 778 return ret; 779 } 780 781 obj = vma->vm_private_data; 782 exynos_gem_obj = to_exynos_gem_obj(obj); 783 784 ret = check_gem_flags(exynos_gem_obj->flags); 785 if (ret) { 786 drm_gem_vm_close(vma); 787 drm_gem_free_mmap_offset(obj); 788 return ret; 789 } 790 791 vma->vm_flags &= ~VM_PFNMAP; 792 vma->vm_flags |= VM_MIXEDMAP; 793 794 update_vm_cache_attr(exynos_gem_obj, vma); 795 796 return ret; 797 } 798