1 /* 2 * Copyright © 2012 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Dave Airlie <airlied@redhat.com> 25 * Rob Clark <rob.clark@linaro.org> 26 * 27 */ 28 29 #include <linux/export.h> 30 #include <linux/dma-buf.h> 31 #include <drm/drmP.h> 32 33 /* 34 * DMA-BUF/GEM Object references and lifetime overview: 35 * 36 * On the export the dma_buf holds a reference to the exporting GEM 37 * object. It takes this reference in handle_to_fd_ioctl, when it 38 * first calls .prime_export and stores the exporting GEM object in 39 * the dma_buf priv. This reference is released when the dma_buf 40 * object goes away in the driver .release function. 41 * 42 * On the import the importing GEM object holds a reference to the 43 * dma_buf (which in turn holds a ref to the exporting GEM object). 44 * It takes that reference in the fd_to_handle ioctl. 45 * It calls dma_buf_get, creates an attachment to it and stores the 46 * attachment in the GEM object. When this attachment is destroyed 47 * when the imported object is destroyed, we remove the attachment 48 * and drop the reference to the dma_buf. 49 * 50 * Thus the chain of references always flows in one direction 51 * (avoiding loops): importing_gem -> dmabuf -> exporting_gem 52 * 53 * Self-importing: if userspace is using PRIME as a replacement for flink 54 * then it will get a fd->handle request for a GEM object that it created. 55 * Drivers should detect this situation and return back the gem object 56 * from the dma-buf private. Prime will do this automatically for drivers that 57 * use the drm_gem_prime_{import,export} helpers. 58 */ 59 60 struct drm_prime_member { 61 struct list_head entry; 62 struct dma_buf *dma_buf; 63 uint32_t handle; 64 }; 65 66 struct drm_prime_attachment { 67 struct sg_table *sgt; 68 enum dma_data_direction dir; 69 }; 70 71 static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) 72 { 73 struct drm_prime_member *member; 74 75 member = kmalloc(sizeof(*member), GFP_KERNEL); 76 if (!member) 77 return -ENOMEM; 78 79 get_dma_buf(dma_buf); 80 member->dma_buf = dma_buf; 81 member->handle = handle; 82 list_add(&member->entry, &prime_fpriv->head); 83 return 0; 84 } 85 86 static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, 87 uint32_t handle) 88 { 89 struct drm_prime_member *member; 90 91 list_for_each_entry(member, &prime_fpriv->head, entry) { 92 if (member->handle == handle) 93 return member->dma_buf; 94 } 95 96 return NULL; 97 } 98 99 static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, 100 struct dma_buf *dma_buf, 101 uint32_t *handle) 102 { 103 struct drm_prime_member *member; 104 105 list_for_each_entry(member, &prime_fpriv->head, entry) { 106 if (member->dma_buf == dma_buf) { 107 *handle = member->handle; 108 return 0; 109 } 110 } 111 return -ENOENT; 112 } 113 114 static int drm_gem_map_attach(struct dma_buf *dma_buf, 115 struct device *target_dev, 116 struct dma_buf_attachment *attach) 117 { 118 struct drm_prime_attachment *prime_attach; 119 struct drm_gem_object *obj = dma_buf->priv; 120 struct drm_device *dev = obj->dev; 121 122 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL); 123 if (!prime_attach) 124 return -ENOMEM; 125 126 prime_attach->dir = DMA_NONE; 127 attach->priv = prime_attach; 128 129 if (!dev->driver->gem_prime_pin) 130 return 0; 131 132 return dev->driver->gem_prime_pin(obj); 133 } 134 135 static void drm_gem_map_detach(struct dma_buf *dma_buf, 136 struct dma_buf_attachment *attach) 137 { 138 struct drm_prime_attachment *prime_attach = attach->priv; 139 struct drm_gem_object *obj = dma_buf->priv; 140 struct drm_device *dev = obj->dev; 141 struct sg_table *sgt; 142 143 if (dev->driver->gem_prime_unpin) 144 dev->driver->gem_prime_unpin(obj); 145 146 if (!prime_attach) 147 return; 148 149 sgt = prime_attach->sgt; 150 if (sgt) { 151 if (prime_attach->dir != DMA_NONE) 152 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, 153 prime_attach->dir); 154 sg_free_table(sgt); 155 } 156 157 kfree(sgt); 158 kfree(prime_attach); 159 attach->priv = NULL; 160 } 161 162 void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, 163 struct dma_buf *dma_buf) 164 { 165 struct drm_prime_member *member, *safe; 166 167 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { 168 if (member->dma_buf == dma_buf) { 169 dma_buf_put(dma_buf); 170 list_del(&member->entry); 171 kfree(member); 172 } 173 } 174 } 175 176 static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 177 enum dma_data_direction dir) 178 { 179 struct drm_prime_attachment *prime_attach = attach->priv; 180 struct drm_gem_object *obj = attach->dmabuf->priv; 181 struct sg_table *sgt; 182 183 if (WARN_ON(dir == DMA_NONE || !prime_attach)) 184 return ERR_PTR(-EINVAL); 185 186 /* return the cached mapping when possible */ 187 if (prime_attach->dir == dir) 188 return prime_attach->sgt; 189 190 /* 191 * two mappings with different directions for the same attachment are 192 * not allowed 193 */ 194 if (WARN_ON(prime_attach->dir != DMA_NONE)) 195 return ERR_PTR(-EBUSY); 196 197 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 198 199 if (!IS_ERR(sgt)) { 200 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) { 201 sg_free_table(sgt); 202 kfree(sgt); 203 sgt = ERR_PTR(-ENOMEM); 204 } else { 205 prime_attach->sgt = sgt; 206 prime_attach->dir = dir; 207 } 208 } 209 210 return sgt; 211 } 212 213 static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 214 struct sg_table *sgt, enum dma_data_direction dir) 215 { 216 /* nothing to be done here */ 217 } 218 219 void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 220 { 221 struct drm_gem_object *obj = dma_buf->priv; 222 223 /* drop the reference on the export fd holds */ 224 drm_gem_object_unreference_unlocked(obj); 225 } 226 EXPORT_SYMBOL(drm_gem_dmabuf_release); 227 228 static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) 229 { 230 struct drm_gem_object *obj = dma_buf->priv; 231 struct drm_device *dev = obj->dev; 232 233 return dev->driver->gem_prime_vmap(obj); 234 } 235 236 static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 237 { 238 struct drm_gem_object *obj = dma_buf->priv; 239 struct drm_device *dev = obj->dev; 240 241 dev->driver->gem_prime_vunmap(obj, vaddr); 242 } 243 244 static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, 245 unsigned long page_num) 246 { 247 return NULL; 248 } 249 250 static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, 251 unsigned long page_num, void *addr) 252 { 253 254 } 255 static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, 256 unsigned long page_num) 257 { 258 return NULL; 259 } 260 261 static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, 262 unsigned long page_num, void *addr) 263 { 264 265 } 266 267 static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, 268 struct vm_area_struct *vma) 269 { 270 struct drm_gem_object *obj = dma_buf->priv; 271 struct drm_device *dev = obj->dev; 272 273 if (!dev->driver->gem_prime_mmap) 274 return -ENOSYS; 275 276 return dev->driver->gem_prime_mmap(obj, vma); 277 } 278 279 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { 280 .attach = drm_gem_map_attach, 281 .detach = drm_gem_map_detach, 282 .map_dma_buf = drm_gem_map_dma_buf, 283 .unmap_dma_buf = drm_gem_unmap_dma_buf, 284 .release = drm_gem_dmabuf_release, 285 .kmap = drm_gem_dmabuf_kmap, 286 .kmap_atomic = drm_gem_dmabuf_kmap_atomic, 287 .kunmap = drm_gem_dmabuf_kunmap, 288 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, 289 .mmap = drm_gem_dmabuf_mmap, 290 .vmap = drm_gem_dmabuf_vmap, 291 .vunmap = drm_gem_dmabuf_vunmap, 292 }; 293 294 /** 295 * DOC: PRIME Helpers 296 * 297 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 298 * simpler APIs by using the helper functions @drm_gem_prime_export and 299 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 300 * five lower-level driver callbacks: 301 * 302 * Export callbacks: 303 * 304 * - @gem_prime_pin (optional): prepare a GEM object for exporting 305 * 306 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages 307 * 308 * - @gem_prime_vmap: vmap a buffer exported by your driver 309 * 310 * - @gem_prime_vunmap: vunmap a buffer exported by your driver 311 * 312 * Import callback: 313 * 314 * - @gem_prime_import_sg_table (import): produce a GEM object from another 315 * driver's scatter/gather table 316 */ 317 318 struct dma_buf *drm_gem_prime_export(struct drm_device *dev, 319 struct drm_gem_object *obj, int flags) 320 { 321 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); 322 } 323 EXPORT_SYMBOL(drm_gem_prime_export); 324 325 static struct dma_buf *export_and_register_object(struct drm_device *dev, 326 struct drm_gem_object *obj, 327 uint32_t flags) 328 { 329 struct dma_buf *dmabuf; 330 331 /* prevent races with concurrent gem_close. */ 332 if (obj->handle_count == 0) { 333 dmabuf = ERR_PTR(-ENOENT); 334 return dmabuf; 335 } 336 337 dmabuf = dev->driver->gem_prime_export(dev, obj, flags); 338 if (IS_ERR(dmabuf)) { 339 /* normally the created dma-buf takes ownership of the ref, 340 * but if that fails then drop the ref 341 */ 342 return dmabuf; 343 } 344 345 /* 346 * Note that callers do not need to clean up the export cache 347 * since the check for obj->handle_count guarantees that someone 348 * will clean it up. 349 */ 350 obj->dma_buf = dmabuf; 351 get_dma_buf(obj->dma_buf); 352 /* Grab a new ref since the callers is now used by the dma-buf */ 353 drm_gem_object_reference(obj); 354 355 return dmabuf; 356 } 357 358 int drm_gem_prime_handle_to_fd(struct drm_device *dev, 359 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 360 int *prime_fd) 361 { 362 struct drm_gem_object *obj; 363 int ret = 0; 364 struct dma_buf *dmabuf; 365 366 mutex_lock(&file_priv->prime.lock); 367 obj = drm_gem_object_lookup(dev, file_priv, handle); 368 if (!obj) { 369 ret = -ENOENT; 370 goto out_unlock; 371 } 372 373 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); 374 if (dmabuf) { 375 get_dma_buf(dmabuf); 376 goto out_have_handle; 377 } 378 379 mutex_lock(&dev->object_name_lock); 380 /* re-export the original imported object */ 381 if (obj->import_attach) { 382 dmabuf = obj->import_attach->dmabuf; 383 get_dma_buf(dmabuf); 384 goto out_have_obj; 385 } 386 387 if (obj->dma_buf) { 388 get_dma_buf(obj->dma_buf); 389 dmabuf = obj->dma_buf; 390 goto out_have_obj; 391 } 392 393 dmabuf = export_and_register_object(dev, obj, flags); 394 if (IS_ERR(dmabuf)) { 395 /* normally the created dma-buf takes ownership of the ref, 396 * but if that fails then drop the ref 397 */ 398 ret = PTR_ERR(dmabuf); 399 mutex_unlock(&dev->object_name_lock); 400 goto out; 401 } 402 403 out_have_obj: 404 /* 405 * If we've exported this buffer then cheat and add it to the import list 406 * so we get the correct handle back. We must do this under the 407 * protection of dev->object_name_lock to ensure that a racing gem close 408 * ioctl doesn't miss to remove this buffer handle from the cache. 409 */ 410 ret = drm_prime_add_buf_handle(&file_priv->prime, 411 dmabuf, handle); 412 mutex_unlock(&dev->object_name_lock); 413 if (ret) 414 goto fail_put_dmabuf; 415 416 out_have_handle: 417 ret = dma_buf_fd(dmabuf, flags); 418 /* 419 * We must _not_ remove the buffer from the handle cache since the newly 420 * created dma buf is already linked in the global obj->dma_buf pointer, 421 * and that is invariant as long as a userspace gem handle exists. 422 * Closing the handle will clean out the cache anyway, so we don't leak. 423 */ 424 if (ret < 0) { 425 goto fail_put_dmabuf; 426 } else { 427 *prime_fd = ret; 428 ret = 0; 429 } 430 431 goto out; 432 433 fail_put_dmabuf: 434 dma_buf_put(dmabuf); 435 out: 436 drm_gem_object_unreference_unlocked(obj); 437 out_unlock: 438 mutex_unlock(&file_priv->prime.lock); 439 440 return ret; 441 } 442 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 443 444 struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, 445 struct dma_buf *dma_buf) 446 { 447 struct dma_buf_attachment *attach; 448 struct sg_table *sgt; 449 struct drm_gem_object *obj; 450 int ret; 451 452 if (!dev->driver->gem_prime_import_sg_table) 453 return ERR_PTR(-EINVAL); 454 455 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { 456 obj = dma_buf->priv; 457 if (obj->dev == dev) { 458 /* 459 * Importing dmabuf exported from out own gem increases 460 * refcount on gem itself instead of f_count of dmabuf. 461 */ 462 drm_gem_object_reference(obj); 463 return obj; 464 } 465 } 466 467 attach = dma_buf_attach(dma_buf, dev->dev); 468 if (IS_ERR(attach)) 469 return ERR_CAST(attach); 470 471 get_dma_buf(dma_buf); 472 473 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); 474 if (IS_ERR_OR_NULL(sgt)) { 475 ret = PTR_ERR(sgt); 476 goto fail_detach; 477 } 478 479 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt); 480 if (IS_ERR(obj)) { 481 ret = PTR_ERR(obj); 482 goto fail_unmap; 483 } 484 485 obj->import_attach = attach; 486 487 return obj; 488 489 fail_unmap: 490 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); 491 fail_detach: 492 dma_buf_detach(dma_buf, attach); 493 dma_buf_put(dma_buf); 494 495 return ERR_PTR(ret); 496 } 497 EXPORT_SYMBOL(drm_gem_prime_import); 498 499 int drm_gem_prime_fd_to_handle(struct drm_device *dev, 500 struct drm_file *file_priv, int prime_fd, uint32_t *handle) 501 { 502 struct dma_buf *dma_buf; 503 struct drm_gem_object *obj; 504 int ret; 505 506 dma_buf = dma_buf_get(prime_fd); 507 if (IS_ERR(dma_buf)) 508 return PTR_ERR(dma_buf); 509 510 mutex_lock(&file_priv->prime.lock); 511 512 ret = drm_prime_lookup_buf_handle(&file_priv->prime, 513 dma_buf, handle); 514 if (ret == 0) 515 goto out_put; 516 517 /* never seen this one, need to import */ 518 mutex_lock(&dev->object_name_lock); 519 obj = dev->driver->gem_prime_import(dev, dma_buf); 520 if (IS_ERR(obj)) { 521 ret = PTR_ERR(obj); 522 goto out_unlock; 523 } 524 525 if (obj->dma_buf) { 526 WARN_ON(obj->dma_buf != dma_buf); 527 } else { 528 obj->dma_buf = dma_buf; 529 get_dma_buf(dma_buf); 530 } 531 532 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 533 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 534 drm_gem_object_unreference_unlocked(obj); 535 if (ret) 536 goto out_put; 537 538 ret = drm_prime_add_buf_handle(&file_priv->prime, 539 dma_buf, *handle); 540 if (ret) 541 goto fail; 542 543 mutex_unlock(&file_priv->prime.lock); 544 545 dma_buf_put(dma_buf); 546 547 return 0; 548 549 fail: 550 /* hmm, if driver attached, we are relying on the free-object path 551 * to detach.. which seems ok.. 552 */ 553 drm_gem_handle_delete(file_priv, *handle); 554 out_unlock: 555 mutex_unlock(&dev->object_name_lock); 556 out_put: 557 dma_buf_put(dma_buf); 558 mutex_unlock(&file_priv->prime.lock); 559 return ret; 560 } 561 EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 562 563 int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 564 struct drm_file *file_priv) 565 { 566 struct drm_prime_handle *args = data; 567 uint32_t flags; 568 569 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 570 return -EINVAL; 571 572 if (!dev->driver->prime_handle_to_fd) 573 return -ENOSYS; 574 575 /* check flags are valid */ 576 if (args->flags & ~DRM_CLOEXEC) 577 return -EINVAL; 578 579 /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ 580 flags = args->flags & DRM_CLOEXEC; 581 582 return dev->driver->prime_handle_to_fd(dev, file_priv, 583 args->handle, flags, &args->fd); 584 } 585 586 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, 587 struct drm_file *file_priv) 588 { 589 struct drm_prime_handle *args = data; 590 591 if (!drm_core_check_feature(dev, DRIVER_PRIME)) 592 return -EINVAL; 593 594 if (!dev->driver->prime_fd_to_handle) 595 return -ENOSYS; 596 597 return dev->driver->prime_fd_to_handle(dev, file_priv, 598 args->fd, &args->handle); 599 } 600 601 /* 602 * drm_prime_pages_to_sg 603 * 604 * this helper creates an sg table object from a set of pages 605 * the driver is responsible for mapping the pages into the 606 * importers address space 607 */ 608 struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 609 { 610 struct sg_table *sg = NULL; 611 int ret; 612 613 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 614 if (!sg) { 615 ret = -ENOMEM; 616 goto out; 617 } 618 619 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 620 nr_pages << PAGE_SHIFT, GFP_KERNEL); 621 if (ret) 622 goto out; 623 624 return sg; 625 out: 626 kfree(sg); 627 return ERR_PTR(ret); 628 } 629 EXPORT_SYMBOL(drm_prime_pages_to_sg); 630 631 /* export an sg table into an array of pages and addresses 632 this is currently required by the TTM driver in order to do correct fault 633 handling */ 634 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 635 dma_addr_t *addrs, int max_pages) 636 { 637 unsigned count; 638 struct scatterlist *sg; 639 struct page *page; 640 u32 len; 641 int pg_index; 642 dma_addr_t addr; 643 644 pg_index = 0; 645 for_each_sg(sgt->sgl, sg, sgt->nents, count) { 646 len = sg->length; 647 page = sg_page(sg); 648 addr = sg_dma_address(sg); 649 650 while (len > 0) { 651 if (WARN_ON(pg_index >= max_pages)) 652 return -1; 653 pages[pg_index] = page; 654 if (addrs) 655 addrs[pg_index] = addr; 656 657 page++; 658 addr += PAGE_SIZE; 659 len -= PAGE_SIZE; 660 pg_index++; 661 } 662 } 663 return 0; 664 } 665 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); 666 /* helper function to cleanup a GEM/prime object */ 667 void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) 668 { 669 struct dma_buf_attachment *attach; 670 struct dma_buf *dma_buf; 671 attach = obj->import_attach; 672 if (sg) 673 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); 674 dma_buf = attach->dmabuf; 675 dma_buf_detach(attach->dmabuf, attach); 676 /* remove the reference */ 677 dma_buf_put(dma_buf); 678 } 679 EXPORT_SYMBOL(drm_prime_gem_destroy); 680 681 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) 682 { 683 INIT_LIST_HEAD(&prime_fpriv->head); 684 mutex_init(&prime_fpriv->lock); 685 } 686 EXPORT_SYMBOL(drm_prime_init_file_private); 687 688 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) 689 { 690 /* by now drm_gem_release should've made sure the list is empty */ 691 WARN_ON(!list_empty(&prime_fpriv->head)); 692 } 693 EXPORT_SYMBOL(drm_prime_destroy_file_private); 694