1 /* exynos_drm_gem.c 2 * 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 * Author: Inki Dae <inki.dae@samsung.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the 8 * Free Software Foundation; either version 2 of the License, or (at your 9 * option) any later version. 10 */ 11 12 #include <drm/drmP.h> 13 #include <drm/drm_vma_manager.h> 14 15 #include <linux/shmem_fs.h> 16 #include <linux/dma-buf.h> 17 #include <linux/pfn_t.h> 18 #include <drm/exynos_drm.h> 19 20 #include "exynos_drm_drv.h" 21 #include "exynos_drm_gem.h" 22 #include "exynos_drm_iommu.h" 23 24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) 25 { 26 struct drm_device *dev = exynos_gem->base.dev; 27 unsigned long attr; 28 unsigned int nr_pages; 29 struct sg_table sgt; 30 int ret = -ENOMEM; 31 32 if (exynos_gem->dma_addr) { 33 DRM_DEBUG_KMS("already allocated.\n"); 34 return 0; 35 } 36 37 exynos_gem->dma_attrs = 0; 38 39 /* 40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory 41 * region will be allocated else physically contiguous 42 * as possible. 43 */ 44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 45 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; 46 47 /* 48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping 49 * else cachable mapping. 50 */ 51 if (exynos_gem->flags & EXYNOS_BO_WC || 52 !(exynos_gem->flags & EXYNOS_BO_CACHABLE)) 53 attr = DMA_ATTR_WRITE_COMBINE; 54 else 55 attr = DMA_ATTR_NON_CONSISTENT; 56 57 exynos_gem->dma_attrs |= attr; 58 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 59 60 nr_pages = exynos_gem->size >> PAGE_SHIFT; 61 62 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *), 63 GFP_KERNEL | __GFP_ZERO); 64 if (!exynos_gem->pages) { 65 DRM_ERROR("failed to allocate pages.\n"); 66 return -ENOMEM; 67 } 68 69 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 70 &exynos_gem->dma_addr, GFP_KERNEL, 71 exynos_gem->dma_attrs); 72 if (!exynos_gem->cookie) { 73 DRM_ERROR("failed to allocate buffer.\n"); 74 goto err_free; 75 } 76 77 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, 78 exynos_gem->dma_addr, exynos_gem->size, 79 exynos_gem->dma_attrs); 80 if (ret < 0) { 81 DRM_ERROR("failed to get sgtable.\n"); 82 goto err_dma_free; 83 } 84 85 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL, 86 nr_pages)) { 87 DRM_ERROR("invalid sgtable.\n"); 88 ret = -EINVAL; 89 goto err_sgt_free; 90 } 91 92 sg_free_table(&sgt); 93 94 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 95 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 96 97 return 0; 98 99 err_sgt_free: 100 sg_free_table(&sgt); 101 err_dma_free: 102 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 103 exynos_gem->dma_addr, exynos_gem->dma_attrs); 104 err_free: 105 kvfree(exynos_gem->pages); 106 107 return ret; 108 } 109 110 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem) 111 { 112 struct drm_device *dev = exynos_gem->base.dev; 113 114 if (!exynos_gem->dma_addr) { 115 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 116 return; 117 } 118 119 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", 120 (unsigned long)exynos_gem->dma_addr, exynos_gem->size); 121 122 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 123 (dma_addr_t)exynos_gem->dma_addr, 124 exynos_gem->dma_attrs); 125 126 kvfree(exynos_gem->pages); 127 } 128 129 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 130 struct drm_file *file_priv, 131 unsigned int *handle) 132 { 133 int ret; 134 135 /* 136 * allocate a id of idr table where the obj is registered 137 * and handle has the id what user can see. 138 */ 139 ret = drm_gem_handle_create(file_priv, obj, handle); 140 if (ret) 141 return ret; 142 143 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle); 144 145 /* drop reference from allocate - handle holds it now. */ 146 drm_gem_object_unreference_unlocked(obj); 147 148 return 0; 149 } 150 151 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem) 152 { 153 struct drm_gem_object *obj = &exynos_gem->base; 154 155 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count); 156 157 /* 158 * do not release memory region from exporter. 159 * 160 * the region will be released by exporter 161 * once dmabuf's refcount becomes 0. 162 */ 163 if (obj->import_attach) 164 drm_prime_gem_destroy(obj, exynos_gem->sgt); 165 else 166 exynos_drm_free_buf(exynos_gem); 167 168 /* release file pointer to gem object. */ 169 drm_gem_object_release(obj); 170 171 kfree(exynos_gem); 172 } 173 174 unsigned long exynos_drm_gem_get_size(struct drm_device *dev, 175 unsigned int gem_handle, 176 struct drm_file *file_priv) 177 { 178 struct exynos_drm_gem *exynos_gem; 179 struct drm_gem_object *obj; 180 181 obj = drm_gem_object_lookup(file_priv, gem_handle); 182 if (!obj) { 183 DRM_ERROR("failed to lookup gem object.\n"); 184 return 0; 185 } 186 187 exynos_gem = to_exynos_gem(obj); 188 189 drm_gem_object_unreference_unlocked(obj); 190 191 return exynos_gem->size; 192 } 193 194 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, 195 unsigned long size) 196 { 197 struct exynos_drm_gem *exynos_gem; 198 struct drm_gem_object *obj; 199 int ret; 200 201 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL); 202 if (!exynos_gem) 203 return ERR_PTR(-ENOMEM); 204 205 exynos_gem->size = size; 206 obj = &exynos_gem->base; 207 208 ret = drm_gem_object_init(dev, obj, size); 209 if (ret < 0) { 210 DRM_ERROR("failed to initialize gem object\n"); 211 kfree(exynos_gem); 212 return ERR_PTR(ret); 213 } 214 215 ret = drm_gem_create_mmap_offset(obj); 216 if (ret < 0) { 217 drm_gem_object_release(obj); 218 kfree(exynos_gem); 219 return ERR_PTR(ret); 220 } 221 222 DRM_DEBUG_KMS("created file object = %pK\n", obj->filp); 223 224 return exynos_gem; 225 } 226 227 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev, 228 unsigned int flags, 229 unsigned long size) 230 { 231 struct exynos_drm_gem *exynos_gem; 232 int ret; 233 234 if (flags & ~(EXYNOS_BO_MASK)) { 235 DRM_ERROR("invalid GEM buffer flags: %u\n", flags); 236 return ERR_PTR(-EINVAL); 237 } 238 239 if (!size) { 240 DRM_ERROR("invalid GEM buffer size: %lu\n", size); 241 return ERR_PTR(-EINVAL); 242 } 243 244 size = roundup(size, PAGE_SIZE); 245 246 exynos_gem = exynos_drm_gem_init(dev, size); 247 if (IS_ERR(exynos_gem)) 248 return exynos_gem; 249 250 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) { 251 /* 252 * when no IOMMU is available, all allocated buffers are 253 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag 254 */ 255 flags &= ~EXYNOS_BO_NONCONTIG; 256 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n"); 257 } 258 259 /* set memory type and cache attribute from user side. */ 260 exynos_gem->flags = flags; 261 262 ret = exynos_drm_alloc_buf(exynos_gem); 263 if (ret < 0) { 264 drm_gem_object_release(&exynos_gem->base); 265 kfree(exynos_gem); 266 return ERR_PTR(ret); 267 } 268 269 return exynos_gem; 270 } 271 272 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 273 struct drm_file *file_priv) 274 { 275 struct drm_exynos_gem_create *args = data; 276 struct exynos_drm_gem *exynos_gem; 277 int ret; 278 279 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size); 280 if (IS_ERR(exynos_gem)) 281 return PTR_ERR(exynos_gem); 282 283 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 284 &args->handle); 285 if (ret) { 286 exynos_drm_gem_destroy(exynos_gem); 287 return ret; 288 } 289 290 return 0; 291 } 292 293 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data, 294 struct drm_file *file_priv) 295 { 296 struct drm_exynos_gem_map *args = data; 297 298 return drm_gem_dumb_map_offset(file_priv, dev, args->handle, 299 &args->offset); 300 } 301 302 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 303 unsigned int gem_handle, 304 struct drm_file *filp) 305 { 306 struct exynos_drm_gem *exynos_gem; 307 struct drm_gem_object *obj; 308 309 obj = drm_gem_object_lookup(filp, gem_handle); 310 if (!obj) { 311 DRM_ERROR("failed to lookup gem object.\n"); 312 return ERR_PTR(-EINVAL); 313 } 314 315 exynos_gem = to_exynos_gem(obj); 316 317 return &exynos_gem->dma_addr; 318 } 319 320 void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 321 unsigned int gem_handle, 322 struct drm_file *filp) 323 { 324 struct drm_gem_object *obj; 325 326 obj = drm_gem_object_lookup(filp, gem_handle); 327 if (!obj) { 328 DRM_ERROR("failed to lookup gem object.\n"); 329 return; 330 } 331 332 drm_gem_object_unreference_unlocked(obj); 333 334 /* 335 * decrease obj->refcount one more time because we has already 336 * increased it at exynos_drm_gem_get_dma_addr(). 337 */ 338 drm_gem_object_unreference_unlocked(obj); 339 } 340 341 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem, 342 struct vm_area_struct *vma) 343 { 344 struct drm_device *drm_dev = exynos_gem->base.dev; 345 unsigned long vm_size; 346 int ret; 347 348 vma->vm_flags &= ~VM_PFNMAP; 349 vma->vm_pgoff = 0; 350 351 vm_size = vma->vm_end - vma->vm_start; 352 353 /* check if user-requested size is valid. */ 354 if (vm_size > exynos_gem->size) 355 return -EINVAL; 356 357 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 358 exynos_gem->dma_addr, exynos_gem->size, 359 exynos_gem->dma_attrs); 360 if (ret < 0) { 361 DRM_ERROR("failed to mmap.\n"); 362 return ret; 363 } 364 365 return 0; 366 } 367 368 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 369 struct drm_file *file_priv) 370 { 371 struct exynos_drm_gem *exynos_gem; 372 struct drm_exynos_gem_info *args = data; 373 struct drm_gem_object *obj; 374 375 obj = drm_gem_object_lookup(file_priv, args->handle); 376 if (!obj) { 377 DRM_ERROR("failed to lookup gem object.\n"); 378 return -EINVAL; 379 } 380 381 exynos_gem = to_exynos_gem(obj); 382 383 args->flags = exynos_gem->flags; 384 args->size = exynos_gem->size; 385 386 drm_gem_object_unreference_unlocked(obj); 387 388 return 0; 389 } 390 391 void exynos_drm_gem_free_object(struct drm_gem_object *obj) 392 { 393 exynos_drm_gem_destroy(to_exynos_gem(obj)); 394 } 395 396 int exynos_drm_gem_dumb_create(struct drm_file *file_priv, 397 struct drm_device *dev, 398 struct drm_mode_create_dumb *args) 399 { 400 struct exynos_drm_gem *exynos_gem; 401 unsigned int flags; 402 int ret; 403 404 /* 405 * allocate memory to be used for framebuffer. 406 * - this callback would be called by user application 407 * with DRM_IOCTL_MODE_CREATE_DUMB command. 408 */ 409 410 args->pitch = args->width * ((args->bpp + 7) / 8); 411 args->size = args->pitch * args->height; 412 413 if (is_drm_iommu_supported(dev)) 414 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; 415 else 416 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; 417 418 exynos_gem = exynos_drm_gem_create(dev, flags, args->size); 419 if (IS_ERR(exynos_gem)) { 420 dev_warn(dev->dev, "FB allocation failed.\n"); 421 return PTR_ERR(exynos_gem); 422 } 423 424 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv, 425 &args->handle); 426 if (ret) { 427 exynos_drm_gem_destroy(exynos_gem); 428 return ret; 429 } 430 431 return 0; 432 } 433 434 int exynos_drm_gem_fault(struct vm_fault *vmf) 435 { 436 struct vm_area_struct *vma = vmf->vma; 437 struct drm_gem_object *obj = vma->vm_private_data; 438 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 439 unsigned long pfn; 440 pgoff_t page_offset; 441 int ret; 442 443 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 444 445 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { 446 DRM_ERROR("invalid page offset\n"); 447 ret = -EINVAL; 448 goto out; 449 } 450 451 pfn = page_to_pfn(exynos_gem->pages[page_offset]); 452 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); 453 454 out: 455 switch (ret) { 456 case 0: 457 case -ERESTARTSYS: 458 case -EINTR: 459 return VM_FAULT_NOPAGE; 460 case -ENOMEM: 461 return VM_FAULT_OOM; 462 default: 463 return VM_FAULT_SIGBUS; 464 } 465 } 466 467 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, 468 struct vm_area_struct *vma) 469 { 470 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 471 int ret; 472 473 DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags); 474 475 /* non-cachable as default. */ 476 if (exynos_gem->flags & EXYNOS_BO_CACHABLE) 477 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 478 else if (exynos_gem->flags & EXYNOS_BO_WC) 479 vma->vm_page_prot = 480 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 481 else 482 vma->vm_page_prot = 483 pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 484 485 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma); 486 if (ret) 487 goto err_close_vm; 488 489 return ret; 490 491 err_close_vm: 492 drm_gem_vm_close(vma); 493 494 return ret; 495 } 496 497 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 498 { 499 struct drm_gem_object *obj; 500 int ret; 501 502 /* set vm_area_struct. */ 503 ret = drm_gem_mmap(filp, vma); 504 if (ret < 0) { 505 DRM_ERROR("failed to mmap.\n"); 506 return ret; 507 } 508 509 obj = vma->vm_private_data; 510 511 if (obj->import_attach) 512 return dma_buf_mmap(obj->dma_buf, vma, 0); 513 514 return exynos_drm_gem_mmap_obj(obj, vma); 515 } 516 517 /* low-level interface prime helpers */ 518 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev, 519 struct dma_buf *dma_buf) 520 { 521 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev)); 522 } 523 524 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj) 525 { 526 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); 527 int npages; 528 529 npages = exynos_gem->size >> PAGE_SHIFT; 530 531 return drm_prime_pages_to_sg(exynos_gem->pages, npages); 532 } 533 534 struct drm_gem_object * 535 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, 536 struct dma_buf_attachment *attach, 537 struct sg_table *sgt) 538 { 539 struct exynos_drm_gem *exynos_gem; 540 int npages; 541 int ret; 542 543 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size); 544 if (IS_ERR(exynos_gem)) { 545 ret = PTR_ERR(exynos_gem); 546 return ERR_PTR(ret); 547 } 548 549 exynos_gem->dma_addr = sg_dma_address(sgt->sgl); 550 551 npages = exynos_gem->size >> PAGE_SHIFT; 552 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 553 if (!exynos_gem->pages) { 554 ret = -ENOMEM; 555 goto err; 556 } 557 558 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL, 559 npages); 560 if (ret < 0) 561 goto err_free_large; 562 563 exynos_gem->sgt = sgt; 564 565 if (sgt->nents == 1) { 566 /* always physically continuous memory if sgt->nents is 1. */ 567 exynos_gem->flags |= EXYNOS_BO_CONTIG; 568 } else { 569 /* 570 * this case could be CONTIG or NONCONTIG type but for now 571 * sets NONCONTIG. 572 * TODO. we have to find a way that exporter can notify 573 * the type of its own buffer to importer. 574 */ 575 exynos_gem->flags |= EXYNOS_BO_NONCONTIG; 576 } 577 578 return &exynos_gem->base; 579 580 err_free_large: 581 kvfree(exynos_gem->pages); 582 err: 583 drm_gem_object_release(&exynos_gem->base); 584 kfree(exynos_gem); 585 return ERR_PTR(ret); 586 } 587 588 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj) 589 { 590 return NULL; 591 } 592 593 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 594 { 595 /* Nothing to do */ 596 } 597 598 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj, 599 struct vm_area_struct *vma) 600 { 601 int ret; 602 603 ret = drm_gem_mmap_obj(obj, obj->size, vma); 604 if (ret < 0) 605 return ret; 606 607 return exynos_drm_gem_mmap_obj(obj, vma); 608 } 609