1 /* exynos_drm_gem.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Author: Inki Dae <inki.dae@samsung.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; either version 2 of the License, or (at your 9 * option) any later version. 10 */ 11 12 #include <drm/drmP.h> 13 #include <drm/drm_vma_manager.h> 14 15 #include <linux/shmem_fs.h> 16 #include <linux/dma-buf.h> 17 #include <linux/pfn_t.h> 18 #include <drm/exynos_drm.h> 19 20 #include "exynos_drm_drv.h" 21 #include "exynos_drm_gem.h" 22 #include "exynos_drm_iommu.h" 23 24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) 25 { 26 struct drm_device *dev = exynos_gem->base.dev; 27 unsigned long attr; 28 unsigned int nr_pages; 29 struct sg_table sgt; 30 int ret = -ENOMEM; 31 32 if (exynos_gem->dma_addr) { 33 DRM_DEBUG_KMS("already allocated.\n"); 34 return 0; 35 } 36 37 exynos_gem->dma_attrs = 0; 38 39 /* 40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 41 * region will be allocated else physically contiguous 42 * as possible. 43 */ 44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 45 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; 46 47 /* 48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 49 * else cachable mapping. 50 */ 51 if (exynos_gem->flags & EXYNOS_BO_WC || 52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 53 attr = DMA_ATTR_WRITE_COMBINE; 54 else 55 attr = DMA_ATTR_NON_CONSISTENT; 56 57 exynos_gem->dma_attrs |= attr; 58 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 59 60 nr_pages = exynos_gem->size >> PAGE_SHIFT; 61 62 exynos_gem->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); 63 if (!exynos_gem->pages) { 64 DRM_ERROR("failed to allocate pages.\n"); 65 return -ENOMEM; 66 } 67 68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 69 &exynos_gem->dma_addr, GFP_KERNEL, 70 exynos_gem->dma_attrs); 71 if (!exynos_gem->cookie) { 72 DRM_ERROR("failed to allocate buffer.\n"); 73 goto err_free; 74 } 75 76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, 77 exynos_gem->dma_addr, exynos_gem->size, 78 exynos_gem->dma_attrs); 79 if (ret < 0) { 80 DRM_ERROR("failed to get sgtable.\n"); 81 goto err_dma_free; 82 } 83 84 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL, 85 nr_pages)) { 86 DRM_ERROR("invalid sgtable.\n"); 87 ret = -EINVAL; 88 goto err_sgt_free; 89 } 90 91 sg_free_table(&sgt); 92 93 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 94 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 95 96 return 0; 97 98 err_sgt_free: 99 sg_free_table(&sgt); 100 err_dma_free: 101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 102 exynos_gem->dma_addr, exynos_gem->dma_attrs); 103 err_free: 104 drm_free_large(exynos_gem->pages); 105 106 return ret; 107 } 108 109 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 110 { 111 struct drm_device *dev = exynos_gem->base.dev; 112 113 if (!exynos_gem->dma_addr) { 114 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 115 return; 116 } 117 118 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 119 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 120 121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 122 (dma_addr_t)exynos_gem->dma_addr, 123 exynos_gem->dma_attrs); 124 125 drm_free_large(exynos_gem->pages); 126 } 127 128 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 129 struct drm_file *file_priv, 130 unsigned int *handle) 131 { 132 int ret; 133 134 /* 135 * allocate a id of idr table where the obj is registered 136 * and handle has the id what user can see. 137 */ 138 ret = drm_gem_handle_create(file_priv, obj, handle); 139 if (ret) 140 return ret; 141 142 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); 143 144 /* drop reference from allocate - handle holds it now. */ 145 drm_gem_object_unreference_unlocked(obj); 146 147 return 0; 148 } 149 150 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 151 { 152 struct drm_gem_object *obj = &exynos_gem->base; 153 154 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); 155 156 /* 157 * do not release memory region from exporter. 158 * 159 * the region will be released by exporter 160 * once dmabuf's refcount becomes 0. 161 */ 162 if (obj->import_attach) 163 drm_prime_gem_destroy(obj, exynos_gem->sgt); 164 else 165 exynos_drm_free_buf(exynos_gem); 166 167 /* release file pointer to gem object. */ 168 drm_gem_object_release(obj); 169 170 kfree(exynos_gem); 171 } 172 173 unsigned long exynos_drm_gem_get_size(struct drm_device *dev, 174 unsigned int gem_handle, 175 struct drm_file *file_priv) 176 { 177 struct exynos_drm_gem *exynos_gem; 178 struct drm_gem_object *obj; 179 180 obj = drm_gem_object_lookup(file_priv, gem_handle); 181 if (!obj) { 182 DRM_ERROR("failed to lookup gem object.\n"); 183 return 0; 184 } 185 186 exynos_gem = to_exynos_gem(obj); 187 188 drm_gem_object_unreference_unlocked(obj); 189 190 return exynos_gem->size; 191 } 192 193 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 194 unsigned long size) 195 { 196 struct exynos_drm_gem *exynos_gem; 197 struct drm_gem_object *obj; 198 int ret; 199 200 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 201 if (!exynos_gem) 202 return ERR_PTR(-ENOMEM); 203 204 exynos_gem->size = size; 205 obj = &exynos_gem->base; 206 207 ret = drm_gem_object_init(dev, obj, size); 208 if (ret < 0) { 209 DRM_ERROR("failed to initialize gem object\n"); 210 kfree(exynos_gem); 211 return ERR_PTR(ret); 212 } 213 214 ret = drm_gem_create_mmap_offset(obj); 215 if (ret < 0) { 216 drm_gem_object_release(obj); 217 kfree(exynos_gem); 218 return ERR_PTR(ret); 219 } 220 221 DRM_DEBUG_KMS("created file object = %p\n", obj->filp); 222 223 return exynos_gem; 224 } 225 226 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 227 unsigned int flags, 228 unsigned long size) 229 { 230 struct exynos_drm_gem *exynos_gem; 231 int ret; 232 233 if (flags & ~(EXYNOS_BO_MASK)) { 234 DRM_ERROR("invalid flags.\n"); 235 return ERR_PTR(-EINVAL); 236 } 237 238 if (!size) { 239 DRM_ERROR("invalid size.\n"); 240 return ERR_PTR(-EINVAL); 241 } 242 243 size = roundup(size, PAGE_SIZE); 244 245 exynos_gem = exynos_drm_gem_init(dev, size); 246 if (IS_ERR(exynos_gem)) 247 return exynos_gem; 248 249 /* set memory type and cache attribute from user side. */ 250 exynos_gem->flags = flags; 251 252 ret = exynos_drm_alloc_buf(exynos_gem); 253 if (ret < 0) { 254 drm_gem_object_release(&exynos_gem->base); 255 kfree(exynos_gem); 256 return ERR_PTR(ret); 257 } 258 259 return exynos_gem; 260 } 261 262 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 263 struct drm_file *file_priv) 264 { 265 struct drm_exynos_gem_create *args = data; 266 struct exynos_drm_gem *exynos_gem; 267 int ret; 268 269 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size); 270 if (IS_ERR(exynos_gem)) 271 return PTR_ERR(exynos_gem); 272 273 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 274 &args->handle); 275 if (ret) { 276 exynos_drm_gem_destroy(exynos_gem); 277 return ret; 278 } 279 280 return 0; 281 } 282 283 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 284 struct drm_file *file_priv) 285 { 286 struct drm_exynos_gem_map *args = data; 287 288 return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle, 289 &args->offset); 290 } 291 292 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 293 unsigned int gem_handle, 294 struct drm_file *filp) 295 { 296 struct exynos_drm_gem *exynos_gem; 297 struct drm_gem_object *obj; 298 299 obj = drm_gem_object_lookup(filp, gem_handle); 300 if (!obj) { 301 DRM_ERROR("failed to lookup gem object.\n"); 302 return ERR_PTR(-EINVAL); 303 } 304 305 exynos_gem = to_exynos_gem(obj); 306 307 return &exynos_gem->dma_addr; 308 } 309 310 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 311 unsigned int gem_handle, 312 struct drm_file *filp) 313 { 314 struct drm_gem_object *obj; 315 316 obj = drm_gem_object_lookup(filp, gem_handle); 317 if (!obj) { 318 DRM_ERROR("failed to lookup gem object.\n"); 319 return; 320 } 321 322 drm_gem_object_unreference_unlocked(obj); 323 324 /* 325 * decrease obj->refcount one more time because we has already 326 * increased it at exynos_drm_gem_get_dma_addr(). 327 */ 328 drm_gem_object_unreference_unlocked(obj); 329 } 330 331 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 332 struct vm_area_struct *vma) 333 { 334 struct drm_device *drm_dev = exynos_gem->base.dev; 335 unsigned long vm_size; 336 int ret; 337 338 vma->vm_flags &= ~VM_PFNMAP; 339 vma->vm_pgoff = 0; 340 341 vm_size = vma->vm_end - vma->vm_start; 342 343 /* check if user-requested size is valid. */ 344 if (vm_size > exynos_gem->size) 345 return -EINVAL; 346 347 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 348 exynos_gem->dma_addr, exynos_gem->size, 349 exynos_gem->dma_attrs); 350 if (ret < 0) { 351 DRM_ERROR("failed to mmap.\n"); 352 return ret; 353 } 354 355 return 0; 356 } 357 358 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 359 struct drm_file *file_priv) 360 { 361 struct exynos_drm_gem *exynos_gem; 362 struct drm_exynos_gem_info *args = data; 363 struct drm_gem_object *obj; 364 365 obj = drm_gem_object_lookup(file_priv, args->handle); 366 if (!obj) { 367 DRM_ERROR("failed to lookup gem object.\n"); 368 return -EINVAL; 369 } 370 371 exynos_gem = to_exynos_gem(obj); 372 373 args->flags = exynos_gem->flags; 374 args->size = exynos_gem->size; 375 376 drm_gem_object_unreference_unlocked(obj); 377 378 return 0; 379 } 380 381 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 382 { 383 exynos_drm_gem_destroy(to_exynos_gem(obj)); 384 } 385 386 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 387 struct drm_device *dev, 388 struct drm_mode_create_dumb *args) 389 { 390 struct exynos_drm_gem *exynos_gem; 391 unsigned int flags; 392 int ret; 393 394 /* 395 * allocate memory to be used for framebuffer. 396 * - this callback would be called by user application 397 * with DRM_IOCTL_MODE_CREATE_DUMB command. 398 */ 399 400 args->pitch = args->width * ((args->bpp + 7) / 8); 401 args->size = args->pitch * args->height; 402 403 if (is_drm_iommu_supported(dev)) 404 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 405 else 406 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 407 408 exynos_gem = exynos_drm_gem_create(dev, flags, args->size); 409 if (IS_ERR(exynos_gem)) { 410 dev_warn(dev->dev, "FB allocation failed.\n"); 411 return PTR_ERR(exynos_gem); 412 } 413 414 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 415 &args->handle); 416 if (ret) { 417 exynos_drm_gem_destroy(exynos_gem); 418 return ret; 419 } 420 421 return 0; 422 } 423 424 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, 425 struct drm_device *dev, uint32_t handle, 426 uint64_t *offset) 427 { 428 struct drm_gem_object *obj; 429 int ret = 0; 430 431 /* 432 * get offset of memory allocated for drm framebuffer. 433 * - this callback would be called by user application 434 * with DRM_IOCTL_MODE_MAP_DUMB command. 435 */ 436 437 obj = drm_gem_object_lookup(file_priv, handle); 438 if (!obj) { 439 DRM_ERROR("failed to lookup gem object.\n"); 440 return -EINVAL; 441 } 442 443 *offset = drm_vma_node_offset_addr(&obj->vma_node); 444 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 445 446 drm_gem_object_unreference_unlocked(obj); 447 return ret; 448 } 449 450 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 451 { 452 struct drm_gem_object *obj = vma->vm_private_data; 453 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 454 unsigned long pfn; 455 pgoff_t page_offset; 456 int ret; 457 458 page_offset = ((unsigned long)vmf->virtual_address - 459 vma->vm_start) >> PAGE_SHIFT; 460 461 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { 462 DRM_ERROR("invalid page offset\n"); 463 ret = -EINVAL; 464 goto out; 465 } 466 467 pfn = page_to_pfn(exynos_gem->pages[page_offset]); 468 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, 469 __pfn_to_pfn_t(pfn, PFN_DEV)); 470 471 out: 472 switch (ret) { 473 case 0: 474 case -ERESTARTSYS: 475 case -EINTR: 476 return VM_FAULT_NOPAGE; 477 case -ENOMEM: 478 return VM_FAULT_OOM; 479 default: 480 return VM_FAULT_SIGBUS; 481 } 482 } 483 484 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 485 struct vm_area_struct *vma) 486 { 487 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 488 int ret; 489 490 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags); 491 492 /* non-cachable as default. */ 493 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 494 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 495 else if (exynos_gem->flags & EXYNOS_BO_WC) 496 vma->vm_page_prot = 497 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 498 else 499 vma->vm_page_prot = 500 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 501 502 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 503 if (ret) 504 goto err_close_vm; 505 506 return ret; 507 508 err_close_vm: 509 drm_gem_vm_close(vma); 510 511 return ret; 512 } 513 514 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 515 { 516 struct drm_gem_object *obj; 517 int ret; 518 519 /* set vm_area_struct. */ 520 ret = drm_gem_mmap(filp, vma); 521 if (ret < 0) { 522 DRM_ERROR("failed to mmap.\n"); 523 return ret; 524 } 525 526 obj = vma->vm_private_data; 527 528 if (obj->import_attach) 529 return dma_buf_mmap(obj->dma_buf, vma, 0); 530 531 return exynos_drm_gem_mmap_obj(obj, vma); 532 } 533 534 /* low-level interface prime helpers */ 535 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 536 { 537 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 538 int npages; 539 540 npages = exynos_gem->size >> PAGE_SHIFT; 541 542 return drm_prime_pages_to_sg(exynos_gem->pages, npages); 543 } 544 545 struct drm_gem_object * 546 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 547 struct dma_buf_attachment *attach, 548 struct sg_table *sgt) 549 { 550 struct exynos_drm_gem *exynos_gem; 551 int npages; 552 int ret; 553 554 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 555 if (IS_ERR(exynos_gem)) { 556 ret = PTR_ERR(exynos_gem); 557 return ERR_PTR(ret); 558 } 559 560 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 561 562 npages = exynos_gem->size >> PAGE_SHIFT; 563 exynos_gem->pages = drm_malloc_ab(npages, sizeof(struct page *)); 564 if (!exynos_gem->pages) { 565 ret = -ENOMEM; 566 goto err; 567 } 568 569 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL, 570 npages); 571 if (ret < 0) 572 goto err_free_large; 573 574 exynos_gem->sgt = sgt; 575 576 if (sgt->nents == 1) { 577 /* always physically continuous memory if sgt->nents is 1. */ 578 exynos_gem->flags |= EXYNOS_BO_CONTIG; 579 } else { 580 /* 581 * this case could be CONTIG or NONCONTIG type but for now 582 * sets NONCONTIG. 583 * TODO. we have to find a way that exporter can notify 584 * the type of its own buffer to importer. 585 */ 586 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 587 } 588 589 return &exynos_gem->base; 590 591 err_free_large: 592 drm_free_large(exynos_gem->pages); 593 err: 594 drm_gem_object_release(&exynos_gem->base); 595 kfree(exynos_gem); 596 return ERR_PTR(ret); 597 } 598 599 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) 600 { 601 return NULL; 602 } 603 604 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 605 { 606 /* Nothing to do */ 607 } 608 609 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, 610 struct vm_area_struct *vma) 611 { 612 int ret; 613 614 ret = drm_gem_mmap_obj(obj, obj->size, vma); 615 if (ret < 0) 616 return ret; 617 618 return exynos_drm_gem_mmap_obj(obj, vma); 619 } 620