1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/dma-buf.h> 17 #include <linux/dma-fence.h> 18 #include <linux/dma-fence-unwrap.h> 19 #include <linux/anon_inodes.h> 20 #include <linux/export.h> 21 #include <linux/debugfs.h> 22 #include <linux/module.h> 23 #include <linux/seq_file.h> 24 #include <linux/sync_file.h> 25 #include <linux/poll.h> 26 #include <linux/dma-resv.h> 27 #include <linux/mm.h> 28 #include <linux/mount.h> 29 #include <linux/pseudo_fs.h> 30 31 #include <uapi/linux/dma-buf.h> 32 #include <uapi/linux/magic.h> 33 34 #include "dma-buf-sysfs-stats.h" 35 36 static inline int is_dma_buf_file(struct file *); 37 38 struct dma_buf_list { 39 struct list_head head; 40 struct mutex lock; 41 }; 42 43 static struct dma_buf_list db_list; 44 45 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) 46 { 47 struct dma_buf *dmabuf; 48 char name[DMA_BUF_NAME_LEN]; 49 size_t ret = 0; 50 51 dmabuf = dentry->d_fsdata; 52 spin_lock(&dmabuf->name_lock); 53 if (dmabuf->name) 54 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); 55 spin_unlock(&dmabuf->name_lock); 56 57 return dynamic_dname(buffer, buflen, "/%s:%s", 58 dentry->d_name.name, ret > 0 ? name : ""); 59 } 60 61 static void dma_buf_release(struct dentry *dentry) 62 { 63 struct dma_buf *dmabuf; 64 65 dmabuf = dentry->d_fsdata; 66 if (unlikely(!dmabuf)) 67 return; 68 69 BUG_ON(dmabuf->vmapping_counter); 70 71 /* 72 * If you hit this BUG() it could mean: 73 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else 74 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback 75 */ 76 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); 77 78 dma_buf_stats_teardown(dmabuf); 79 dmabuf->ops->release(dmabuf); 80 81 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 82 dma_resv_fini(dmabuf->resv); 83 84 WARN_ON(!list_empty(&dmabuf->attachments)); 85 module_put(dmabuf->owner); 86 kfree(dmabuf->name); 87 kfree(dmabuf); 88 } 89 90 static int dma_buf_file_release(struct inode *inode, struct file *file) 91 { 92 struct dma_buf *dmabuf; 93 94 if (!is_dma_buf_file(file)) 95 return -EINVAL; 96 97 dmabuf = file->private_data; 98 99 mutex_lock(&db_list.lock); 100 list_del(&dmabuf->list_node); 101 mutex_unlock(&db_list.lock); 102 103 return 0; 104 } 105 106 static const struct dentry_operations dma_buf_dentry_ops = { 107 .d_dname = dmabuffs_dname, 108 .d_release = dma_buf_release, 109 }; 110 111 static struct vfsmount *dma_buf_mnt; 112 113 static int dma_buf_fs_init_context(struct fs_context *fc) 114 { 115 struct pseudo_fs_context *ctx; 116 117 ctx = init_pseudo(fc, DMA_BUF_MAGIC); 118 if (!ctx) 119 return -ENOMEM; 120 ctx->dops = &dma_buf_dentry_ops; 121 return 0; 122 } 123 124 static struct file_system_type dma_buf_fs_type = { 125 .name = "dmabuf", 126 .init_fs_context = dma_buf_fs_init_context, 127 .kill_sb = kill_anon_super, 128 }; 129 130 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 131 { 132 struct dma_buf *dmabuf; 133 134 if (!is_dma_buf_file(file)) 135 return -EINVAL; 136 137 dmabuf = file->private_data; 138 139 /* check if buffer supports mmap */ 140 if (!dmabuf->ops->mmap) 141 return -EINVAL; 142 143 /* check for overflowing the buffer's size */ 144 if (vma->vm_pgoff + vma_pages(vma) > 145 dmabuf->size >> PAGE_SHIFT) 146 return -EINVAL; 147 148 return dmabuf->ops->mmap(dmabuf, vma); 149 } 150 151 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 152 { 153 struct dma_buf *dmabuf; 154 loff_t base; 155 156 if (!is_dma_buf_file(file)) 157 return -EBADF; 158 159 dmabuf = file->private_data; 160 161 /* only support discovering the end of the buffer, 162 but also allow SEEK_SET to maintain the idiomatic 163 SEEK_END(0), SEEK_CUR(0) pattern */ 164 if (whence == SEEK_END) 165 base = dmabuf->size; 166 else if (whence == SEEK_SET) 167 base = 0; 168 else 169 return -EINVAL; 170 171 if (offset != 0) 172 return -EINVAL; 173 174 return base + offset; 175 } 176 177 /** 178 * DOC: implicit fence polling 179 * 180 * To support cross-device and cross-driver synchronization of buffer access 181 * implicit fences (represented internally in the kernel with &struct dma_fence) 182 * can be attached to a &dma_buf. The glue for that and a few related things are 183 * provided in the &dma_resv structure. 184 * 185 * Userspace can query the state of these implicitly tracked fences using poll() 186 * and related system calls: 187 * 188 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 189 * most recent write or exclusive fence. 190 * 191 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 192 * all attached fences, shared and exclusive ones. 193 * 194 * Note that this only signals the completion of the respective fences, i.e. the 195 * DMA transfers are complete. Cache flushing and any other necessary 196 * preparations before CPU access can begin still need to happen. 197 * 198 * As an alternative to poll(), the set of fences on DMA buffer can be 199 * exported as a &sync_file using &dma_buf_sync_file_export. 200 */ 201 202 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 203 { 204 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; 205 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); 206 unsigned long flags; 207 208 spin_lock_irqsave(&dcb->poll->lock, flags); 209 wake_up_locked_poll(dcb->poll, dcb->active); 210 dcb->active = 0; 211 spin_unlock_irqrestore(&dcb->poll->lock, flags); 212 dma_fence_put(fence); 213 /* Paired with get_file in dma_buf_poll */ 214 fput(dmabuf->file); 215 } 216 217 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, 218 struct dma_buf_poll_cb_t *dcb) 219 { 220 struct dma_resv_iter cursor; 221 struct dma_fence *fence; 222 int r; 223 224 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write), 225 fence) { 226 dma_fence_get(fence); 227 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 228 if (!r) 229 return true; 230 dma_fence_put(fence); 231 } 232 233 return false; 234 } 235 236 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 237 { 238 struct dma_buf *dmabuf; 239 struct dma_resv *resv; 240 __poll_t events; 241 242 dmabuf = file->private_data; 243 if (!dmabuf || !dmabuf->resv) 244 return EPOLLERR; 245 246 resv = dmabuf->resv; 247 248 poll_wait(file, &dmabuf->poll, poll); 249 250 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 251 if (!events) 252 return 0; 253 254 dma_resv_lock(resv, NULL); 255 256 if (events & EPOLLOUT) { 257 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; 258 259 /* Check that callback isn't busy */ 260 spin_lock_irq(&dmabuf->poll.lock); 261 if (dcb->active) 262 events &= ~EPOLLOUT; 263 else 264 dcb->active = EPOLLOUT; 265 spin_unlock_irq(&dmabuf->poll.lock); 266 267 if (events & EPOLLOUT) { 268 /* Paired with fput in dma_buf_poll_cb */ 269 get_file(dmabuf->file); 270 271 if (!dma_buf_poll_add_cb(resv, true, dcb)) 272 /* No callback queued, wake up any other waiters */ 273 dma_buf_poll_cb(NULL, &dcb->cb); 274 else 275 events &= ~EPOLLOUT; 276 } 277 } 278 279 if (events & EPOLLIN) { 280 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; 281 282 /* Check that callback isn't busy */ 283 spin_lock_irq(&dmabuf->poll.lock); 284 if (dcb->active) 285 events &= ~EPOLLIN; 286 else 287 dcb->active = EPOLLIN; 288 spin_unlock_irq(&dmabuf->poll.lock); 289 290 if (events & EPOLLIN) { 291 /* Paired with fput in dma_buf_poll_cb */ 292 get_file(dmabuf->file); 293 294 if (!dma_buf_poll_add_cb(resv, false, dcb)) 295 /* No callback queued, wake up any other waiters */ 296 dma_buf_poll_cb(NULL, &dcb->cb); 297 else 298 events &= ~EPOLLIN; 299 } 300 } 301 302 dma_resv_unlock(resv); 303 return events; 304 } 305 306 /** 307 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. 308 * It could support changing the name of the dma-buf if the same 309 * piece of memory is used for multiple purpose between different devices. 310 * 311 * @dmabuf: [in] dmabuf buffer that will be renamed. 312 * @buf: [in] A piece of userspace memory that contains the name of 313 * the dma-buf. 314 * 315 * Returns 0 on success. If the dma-buf buffer is already attached to 316 * devices, return -EBUSY. 317 * 318 */ 319 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) 320 { 321 char *name = strndup_user(buf, DMA_BUF_NAME_LEN); 322 323 if (IS_ERR(name)) 324 return PTR_ERR(name); 325 326 spin_lock(&dmabuf->name_lock); 327 kfree(dmabuf->name); 328 dmabuf->name = name; 329 spin_unlock(&dmabuf->name_lock); 330 331 return 0; 332 } 333 334 #if IS_ENABLED(CONFIG_SYNC_FILE) 335 static long dma_buf_export_sync_file(struct dma_buf *dmabuf, 336 void __user *user_data) 337 { 338 struct dma_buf_export_sync_file arg; 339 enum dma_resv_usage usage; 340 struct dma_fence *fence = NULL; 341 struct sync_file *sync_file; 342 int fd, ret; 343 344 if (copy_from_user(&arg, user_data, sizeof(arg))) 345 return -EFAULT; 346 347 if (arg.flags & ~DMA_BUF_SYNC_RW) 348 return -EINVAL; 349 350 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 351 return -EINVAL; 352 353 fd = get_unused_fd_flags(O_CLOEXEC); 354 if (fd < 0) 355 return fd; 356 357 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE); 358 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence); 359 if (ret) 360 goto err_put_fd; 361 362 if (!fence) 363 fence = dma_fence_get_stub(); 364 365 sync_file = sync_file_create(fence); 366 367 dma_fence_put(fence); 368 369 if (!sync_file) { 370 ret = -ENOMEM; 371 goto err_put_fd; 372 } 373 374 arg.fd = fd; 375 if (copy_to_user(user_data, &arg, sizeof(arg))) { 376 ret = -EFAULT; 377 goto err_put_file; 378 } 379 380 fd_install(fd, sync_file->file); 381 382 return 0; 383 384 err_put_file: 385 fput(sync_file->file); 386 err_put_fd: 387 put_unused_fd(fd); 388 return ret; 389 } 390 391 static long dma_buf_import_sync_file(struct dma_buf *dmabuf, 392 const void __user *user_data) 393 { 394 struct dma_buf_import_sync_file arg; 395 struct dma_fence *fence, *f; 396 enum dma_resv_usage usage; 397 struct dma_fence_unwrap iter; 398 unsigned int num_fences; 399 int ret = 0; 400 401 if (copy_from_user(&arg, user_data, sizeof(arg))) 402 return -EFAULT; 403 404 if (arg.flags & ~DMA_BUF_SYNC_RW) 405 return -EINVAL; 406 407 if ((arg.flags & DMA_BUF_SYNC_RW) == 0) 408 return -EINVAL; 409 410 fence = sync_file_get_fence(arg.fd); 411 if (!fence) 412 return -EINVAL; 413 414 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE : 415 DMA_RESV_USAGE_READ; 416 417 num_fences = 0; 418 dma_fence_unwrap_for_each(f, &iter, fence) 419 ++num_fences; 420 421 if (num_fences > 0) { 422 dma_resv_lock(dmabuf->resv, NULL); 423 424 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences); 425 if (!ret) { 426 dma_fence_unwrap_for_each(f, &iter, fence) 427 dma_resv_add_fence(dmabuf->resv, f, usage); 428 } 429 430 dma_resv_unlock(dmabuf->resv); 431 } 432 433 dma_fence_put(fence); 434 435 return ret; 436 } 437 #endif 438 439 static long dma_buf_ioctl(struct file *file, 440 unsigned int cmd, unsigned long arg) 441 { 442 struct dma_buf *dmabuf; 443 struct dma_buf_sync sync; 444 enum dma_data_direction direction; 445 int ret; 446 447 dmabuf = file->private_data; 448 449 switch (cmd) { 450 case DMA_BUF_IOCTL_SYNC: 451 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) 452 return -EFAULT; 453 454 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) 455 return -EINVAL; 456 457 switch (sync.flags & DMA_BUF_SYNC_RW) { 458 case DMA_BUF_SYNC_READ: 459 direction = DMA_FROM_DEVICE; 460 break; 461 case DMA_BUF_SYNC_WRITE: 462 direction = DMA_TO_DEVICE; 463 break; 464 case DMA_BUF_SYNC_RW: 465 direction = DMA_BIDIRECTIONAL; 466 break; 467 default: 468 return -EINVAL; 469 } 470 471 if (sync.flags & DMA_BUF_SYNC_END) 472 ret = dma_buf_end_cpu_access(dmabuf, direction); 473 else 474 ret = dma_buf_begin_cpu_access(dmabuf, direction); 475 476 return ret; 477 478 case DMA_BUF_SET_NAME_A: 479 case DMA_BUF_SET_NAME_B: 480 return dma_buf_set_name(dmabuf, (const char __user *)arg); 481 482 #if IS_ENABLED(CONFIG_SYNC_FILE) 483 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE: 484 return dma_buf_export_sync_file(dmabuf, (void __user *)arg); 485 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE: 486 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg); 487 #endif 488 489 default: 490 return -ENOTTY; 491 } 492 } 493 494 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) 495 { 496 struct dma_buf *dmabuf = file->private_data; 497 498 seq_printf(m, "size:\t%zu\n", dmabuf->size); 499 /* Don't count the temporary reference taken inside procfs seq_show */ 500 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 501 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 502 spin_lock(&dmabuf->name_lock); 503 if (dmabuf->name) 504 seq_printf(m, "name:\t%s\n", dmabuf->name); 505 spin_unlock(&dmabuf->name_lock); 506 } 507 508 static const struct file_operations dma_buf_fops = { 509 .release = dma_buf_file_release, 510 .mmap = dma_buf_mmap_internal, 511 .llseek = dma_buf_llseek, 512 .poll = dma_buf_poll, 513 .unlocked_ioctl = dma_buf_ioctl, 514 .compat_ioctl = compat_ptr_ioctl, 515 .show_fdinfo = dma_buf_show_fdinfo, 516 }; 517 518 /* 519 * is_dma_buf_file - Check if struct file* is associated with dma_buf 520 */ 521 static inline int is_dma_buf_file(struct file *file) 522 { 523 return file->f_op == &dma_buf_fops; 524 } 525 526 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) 527 { 528 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0); 529 struct file *file; 530 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); 531 532 if (IS_ERR(inode)) 533 return ERR_CAST(inode); 534 535 inode->i_size = dmabuf->size; 536 inode_set_bytes(inode, dmabuf->size); 537 538 /* 539 * The ->i_ino acquired from get_next_ino() is not unique thus 540 * not suitable for using it as dentry name by dmabuf stats. 541 * Override ->i_ino with the unique and dmabuffs specific 542 * value. 543 */ 544 inode->i_ino = atomic64_add_return(1, &dmabuf_inode); 545 flags &= O_ACCMODE | O_NONBLOCK; 546 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", 547 flags, &dma_buf_fops); 548 if (IS_ERR(file)) 549 goto err_alloc_file; 550 file->private_data = dmabuf; 551 file->f_path.dentry->d_fsdata = dmabuf; 552 553 return file; 554 555 err_alloc_file: 556 iput(inode); 557 return file; 558 } 559 560 /** 561 * DOC: dma buf device access 562 * 563 * For device DMA access to a shared DMA buffer the usual sequence of operations 564 * is fairly simple: 565 * 566 * 1. The exporter defines his exporter instance using 567 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private 568 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace 569 * as a file descriptor by calling dma_buf_fd(). 570 * 571 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer 572 * to share with: First the file descriptor is converted to a &dma_buf using 573 * dma_buf_get(). Then the buffer is attached to the device using 574 * dma_buf_attach(). 575 * 576 * Up to this stage the exporter is still free to migrate or reallocate the 577 * backing storage. 578 * 579 * 3. Once the buffer is attached to all devices userspace can initiate DMA 580 * access to the shared buffer. In the kernel this is done by calling 581 * dma_buf_map_attachment() and dma_buf_unmap_attachment(). 582 * 583 * 4. Once a driver is done with a shared buffer it needs to call 584 * dma_buf_detach() (after cleaning up any mappings) and then release the 585 * reference acquired with dma_buf_get() by calling dma_buf_put(). 586 * 587 * For the detailed semantics exporters are expected to implement see 588 * &dma_buf_ops. 589 */ 590 591 /** 592 * dma_buf_export - Creates a new dma_buf, and associates an anon file 593 * with this buffer, so it can be exported. 594 * Also connect the allocator specific data and ops to the buffer. 595 * Additionally, provide a name string for exporter; useful in debugging. 596 * 597 * @exp_info: [in] holds all the export related information provided 598 * by the exporter. see &struct dma_buf_export_info 599 * for further details. 600 * 601 * Returns, on success, a newly created struct dma_buf object, which wraps the 602 * supplied private data and operations for struct dma_buf_ops. On either 603 * missing ops, or error in allocating struct dma_buf, will return negative 604 * error. 605 * 606 * For most cases the easiest way to create @exp_info is through the 607 * %DEFINE_DMA_BUF_EXPORT_INFO macro. 608 */ 609 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 610 { 611 struct dma_buf *dmabuf; 612 struct dma_resv *resv = exp_info->resv; 613 struct file *file; 614 size_t alloc_size = sizeof(struct dma_buf); 615 int ret; 616 617 if (!exp_info->resv) 618 alloc_size += sizeof(struct dma_resv); 619 else 620 /* prevent &dma_buf[1] == dma_buf->resv */ 621 alloc_size += 1; 622 623 if (WARN_ON(!exp_info->priv 624 || !exp_info->ops 625 || !exp_info->ops->map_dma_buf 626 || !exp_info->ops->unmap_dma_buf 627 || !exp_info->ops->release)) { 628 return ERR_PTR(-EINVAL); 629 } 630 631 if (WARN_ON(exp_info->ops->cache_sgt_mapping && 632 (exp_info->ops->pin || exp_info->ops->unpin))) 633 return ERR_PTR(-EINVAL); 634 635 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) 636 return ERR_PTR(-EINVAL); 637 638 if (!try_module_get(exp_info->owner)) 639 return ERR_PTR(-ENOENT); 640 641 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 642 if (!dmabuf) { 643 ret = -ENOMEM; 644 goto err_module; 645 } 646 647 dmabuf->priv = exp_info->priv; 648 dmabuf->ops = exp_info->ops; 649 dmabuf->size = exp_info->size; 650 dmabuf->exp_name = exp_info->exp_name; 651 dmabuf->owner = exp_info->owner; 652 spin_lock_init(&dmabuf->name_lock); 653 init_waitqueue_head(&dmabuf->poll); 654 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; 655 dmabuf->cb_in.active = dmabuf->cb_out.active = 0; 656 657 if (!resv) { 658 resv = (struct dma_resv *)&dmabuf[1]; 659 dma_resv_init(resv); 660 } 661 dmabuf->resv = resv; 662 663 file = dma_buf_getfile(dmabuf, exp_info->flags); 664 if (IS_ERR(file)) { 665 ret = PTR_ERR(file); 666 goto err_dmabuf; 667 } 668 669 dmabuf->file = file; 670 671 mutex_init(&dmabuf->lock); 672 INIT_LIST_HEAD(&dmabuf->attachments); 673 674 mutex_lock(&db_list.lock); 675 list_add(&dmabuf->list_node, &db_list.head); 676 mutex_unlock(&db_list.lock); 677 678 ret = dma_buf_stats_setup(dmabuf); 679 if (ret) 680 goto err_sysfs; 681 682 return dmabuf; 683 684 err_sysfs: 685 /* 686 * Set file->f_path.dentry->d_fsdata to NULL so that when 687 * dma_buf_release() gets invoked by dentry_ops, it exits 688 * early before calling the release() dma_buf op. 689 */ 690 file->f_path.dentry->d_fsdata = NULL; 691 fput(file); 692 err_dmabuf: 693 kfree(dmabuf); 694 err_module: 695 module_put(exp_info->owner); 696 return ERR_PTR(ret); 697 } 698 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); 699 700 /** 701 * dma_buf_fd - returns a file descriptor for the given struct dma_buf 702 * @dmabuf: [in] pointer to dma_buf for which fd is required. 703 * @flags: [in] flags to give to fd 704 * 705 * On success, returns an associated 'fd'. Else, returns error. 706 */ 707 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 708 { 709 int fd; 710 711 if (!dmabuf || !dmabuf->file) 712 return -EINVAL; 713 714 fd = get_unused_fd_flags(flags); 715 if (fd < 0) 716 return fd; 717 718 fd_install(fd, dmabuf->file); 719 720 return fd; 721 } 722 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); 723 724 /** 725 * dma_buf_get - returns the struct dma_buf related to an fd 726 * @fd: [in] fd associated with the struct dma_buf to be returned 727 * 728 * On success, returns the struct dma_buf associated with an fd; uses 729 * file's refcounting done by fget to increase refcount. returns ERR_PTR 730 * otherwise. 731 */ 732 struct dma_buf *dma_buf_get(int fd) 733 { 734 struct file *file; 735 736 file = fget(fd); 737 738 if (!file) 739 return ERR_PTR(-EBADF); 740 741 if (!is_dma_buf_file(file)) { 742 fput(file); 743 return ERR_PTR(-EINVAL); 744 } 745 746 return file->private_data; 747 } 748 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); 749 750 /** 751 * dma_buf_put - decreases refcount of the buffer 752 * @dmabuf: [in] buffer to reduce refcount of 753 * 754 * Uses file's refcounting done implicitly by fput(). 755 * 756 * If, as a result of this call, the refcount becomes 0, the 'release' file 757 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc 758 * in turn, and frees the memory allocated for dmabuf when exported. 759 */ 760 void dma_buf_put(struct dma_buf *dmabuf) 761 { 762 if (WARN_ON(!dmabuf || !dmabuf->file)) 763 return; 764 765 fput(dmabuf->file); 766 } 767 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); 768 769 static void mangle_sg_table(struct sg_table *sg_table) 770 { 771 #ifdef CONFIG_DMABUF_DEBUG 772 int i; 773 struct scatterlist *sg; 774 775 /* To catch abuse of the underlying struct page by importers mix 776 * up the bits, but take care to preserve the low SG_ bits to 777 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf 778 * before passing the sgt back to the exporter. */ 779 for_each_sgtable_sg(sg_table, sg, i) 780 sg->page_link ^= ~0xffUL; 781 #endif 782 783 } 784 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, 785 enum dma_data_direction direction) 786 { 787 struct sg_table *sg_table; 788 signed long ret; 789 790 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 791 if (IS_ERR_OR_NULL(sg_table)) 792 return sg_table; 793 794 if (!dma_buf_attachment_is_dynamic(attach)) { 795 ret = dma_resv_wait_timeout(attach->dmabuf->resv, 796 DMA_RESV_USAGE_KERNEL, true, 797 MAX_SCHEDULE_TIMEOUT); 798 if (ret < 0) { 799 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, 800 direction); 801 return ERR_PTR(ret); 802 } 803 } 804 805 mangle_sg_table(sg_table); 806 return sg_table; 807 } 808 809 /** 810 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list 811 * @dmabuf: [in] buffer to attach device to. 812 * @dev: [in] device to be attached. 813 * @importer_ops: [in] importer operations for the attachment 814 * @importer_priv: [in] importer private pointer for the attachment 815 * 816 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 817 * must be cleaned up by calling dma_buf_detach(). 818 * 819 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach 820 * functionality. 821 * 822 * Returns: 823 * 824 * A pointer to newly created &dma_buf_attachment on success, or a negative 825 * error code wrapped into a pointer on failure. 826 * 827 * Note that this can fail if the backing storage of @dmabuf is in a place not 828 * accessible to @dev, and cannot be moved to a more suitable place. This is 829 * indicated with the error code -EBUSY. 830 */ 831 struct dma_buf_attachment * 832 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 833 const struct dma_buf_attach_ops *importer_ops, 834 void *importer_priv) 835 { 836 struct dma_buf_attachment *attach; 837 int ret; 838 839 if (WARN_ON(!dmabuf || !dev)) 840 return ERR_PTR(-EINVAL); 841 842 if (WARN_ON(importer_ops && !importer_ops->move_notify)) 843 return ERR_PTR(-EINVAL); 844 845 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 846 if (!attach) 847 return ERR_PTR(-ENOMEM); 848 849 attach->dev = dev; 850 attach->dmabuf = dmabuf; 851 if (importer_ops) 852 attach->peer2peer = importer_ops->allow_peer2peer; 853 attach->importer_ops = importer_ops; 854 attach->importer_priv = importer_priv; 855 856 if (dmabuf->ops->attach) { 857 ret = dmabuf->ops->attach(dmabuf, attach); 858 if (ret) 859 goto err_attach; 860 } 861 dma_resv_lock(dmabuf->resv, NULL); 862 list_add(&attach->node, &dmabuf->attachments); 863 dma_resv_unlock(dmabuf->resv); 864 865 /* When either the importer or the exporter can't handle dynamic 866 * mappings we cache the mapping here to avoid issues with the 867 * reservation object lock. 868 */ 869 if (dma_buf_attachment_is_dynamic(attach) != 870 dma_buf_is_dynamic(dmabuf)) { 871 struct sg_table *sgt; 872 873 if (dma_buf_is_dynamic(attach->dmabuf)) { 874 dma_resv_lock(attach->dmabuf->resv, NULL); 875 ret = dmabuf->ops->pin(attach); 876 if (ret) 877 goto err_unlock; 878 } 879 880 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); 881 if (!sgt) 882 sgt = ERR_PTR(-ENOMEM); 883 if (IS_ERR(sgt)) { 884 ret = PTR_ERR(sgt); 885 goto err_unpin; 886 } 887 if (dma_buf_is_dynamic(attach->dmabuf)) 888 dma_resv_unlock(attach->dmabuf->resv); 889 attach->sgt = sgt; 890 attach->dir = DMA_BIDIRECTIONAL; 891 } 892 893 return attach; 894 895 err_attach: 896 kfree(attach); 897 return ERR_PTR(ret); 898 899 err_unpin: 900 if (dma_buf_is_dynamic(attach->dmabuf)) 901 dmabuf->ops->unpin(attach); 902 903 err_unlock: 904 if (dma_buf_is_dynamic(attach->dmabuf)) 905 dma_resv_unlock(attach->dmabuf->resv); 906 907 dma_buf_detach(dmabuf, attach); 908 return ERR_PTR(ret); 909 } 910 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); 911 912 /** 913 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach 914 * @dmabuf: [in] buffer to attach device to. 915 * @dev: [in] device to be attached. 916 * 917 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static 918 * mapping. 919 */ 920 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 921 struct device *dev) 922 { 923 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); 924 } 925 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); 926 927 static void __unmap_dma_buf(struct dma_buf_attachment *attach, 928 struct sg_table *sg_table, 929 enum dma_data_direction direction) 930 { 931 /* uses XOR, hence this unmangles */ 932 mangle_sg_table(sg_table); 933 934 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 935 } 936 937 /** 938 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list 939 * @dmabuf: [in] buffer to detach from. 940 * @attach: [in] attachment to be detached; is free'd after this call. 941 * 942 * Clean up a device attachment obtained by calling dma_buf_attach(). 943 * 944 * Optionally this calls &dma_buf_ops.detach for device-specific detach. 945 */ 946 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 947 { 948 if (WARN_ON(!dmabuf || !attach)) 949 return; 950 951 if (attach->sgt) { 952 if (dma_buf_is_dynamic(attach->dmabuf)) 953 dma_resv_lock(attach->dmabuf->resv, NULL); 954 955 __unmap_dma_buf(attach, attach->sgt, attach->dir); 956 957 if (dma_buf_is_dynamic(attach->dmabuf)) { 958 dmabuf->ops->unpin(attach); 959 dma_resv_unlock(attach->dmabuf->resv); 960 } 961 } 962 963 dma_resv_lock(dmabuf->resv, NULL); 964 list_del(&attach->node); 965 dma_resv_unlock(dmabuf->resv); 966 if (dmabuf->ops->detach) 967 dmabuf->ops->detach(dmabuf, attach); 968 969 kfree(attach); 970 } 971 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); 972 973 /** 974 * dma_buf_pin - Lock down the DMA-buf 975 * @attach: [in] attachment which should be pinned 976 * 977 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may 978 * call this, and only for limited use cases like scanout and not for temporary 979 * pin operations. It is not permitted to allow userspace to pin arbitrary 980 * amounts of buffers through this interface. 981 * 982 * Buffers must be unpinned by calling dma_buf_unpin(). 983 * 984 * Returns: 985 * 0 on success, negative error code on failure. 986 */ 987 int dma_buf_pin(struct dma_buf_attachment *attach) 988 { 989 struct dma_buf *dmabuf = attach->dmabuf; 990 int ret = 0; 991 992 WARN_ON(!dma_buf_attachment_is_dynamic(attach)); 993 994 dma_resv_assert_held(dmabuf->resv); 995 996 if (dmabuf->ops->pin) 997 ret = dmabuf->ops->pin(attach); 998 999 return ret; 1000 } 1001 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); 1002 1003 /** 1004 * dma_buf_unpin - Unpin a DMA-buf 1005 * @attach: [in] attachment which should be unpinned 1006 * 1007 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move 1008 * any mapping of @attach again and inform the importer through 1009 * &dma_buf_attach_ops.move_notify. 1010 */ 1011 void dma_buf_unpin(struct dma_buf_attachment *attach) 1012 { 1013 struct dma_buf *dmabuf = attach->dmabuf; 1014 1015 WARN_ON(!dma_buf_attachment_is_dynamic(attach)); 1016 1017 dma_resv_assert_held(dmabuf->resv); 1018 1019 if (dmabuf->ops->unpin) 1020 dmabuf->ops->unpin(attach); 1021 } 1022 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); 1023 1024 /** 1025 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 1026 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 1027 * dma_buf_ops. 1028 * @attach: [in] attachment whose scatterlist is to be returned 1029 * @direction: [in] direction of DMA transfer 1030 * 1031 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 1032 * on error. May return -EINTR if it is interrupted by a signal. 1033 * 1034 * On success, the DMA addresses and lengths in the returned scatterlist are 1035 * PAGE_SIZE aligned. 1036 * 1037 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that 1038 * the underlying backing storage is pinned for as long as a mapping exists, 1039 * therefore users/importers should not hold onto a mapping for undue amounts of 1040 * time. 1041 * 1042 * Important: Dynamic importers must wait for the exclusive fence of the struct 1043 * dma_resv attached to the DMA-BUF first. 1044 */ 1045 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 1046 enum dma_data_direction direction) 1047 { 1048 struct sg_table *sg_table; 1049 int r; 1050 1051 might_sleep(); 1052 1053 if (WARN_ON(!attach || !attach->dmabuf)) 1054 return ERR_PTR(-EINVAL); 1055 1056 if (dma_buf_attachment_is_dynamic(attach)) 1057 dma_resv_assert_held(attach->dmabuf->resv); 1058 1059 if (attach->sgt) { 1060 /* 1061 * Two mappings with different directions for the same 1062 * attachment are not allowed. 1063 */ 1064 if (attach->dir != direction && 1065 attach->dir != DMA_BIDIRECTIONAL) 1066 return ERR_PTR(-EBUSY); 1067 1068 return attach->sgt; 1069 } 1070 1071 if (dma_buf_is_dynamic(attach->dmabuf)) { 1072 dma_resv_assert_held(attach->dmabuf->resv); 1073 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { 1074 r = attach->dmabuf->ops->pin(attach); 1075 if (r) 1076 return ERR_PTR(r); 1077 } 1078 } 1079 1080 sg_table = __map_dma_buf(attach, direction); 1081 if (!sg_table) 1082 sg_table = ERR_PTR(-ENOMEM); 1083 1084 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && 1085 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 1086 attach->dmabuf->ops->unpin(attach); 1087 1088 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { 1089 attach->sgt = sg_table; 1090 attach->dir = direction; 1091 } 1092 1093 #ifdef CONFIG_DMA_API_DEBUG 1094 if (!IS_ERR(sg_table)) { 1095 struct scatterlist *sg; 1096 u64 addr; 1097 int len; 1098 int i; 1099 1100 for_each_sgtable_dma_sg(sg_table, sg, i) { 1101 addr = sg_dma_address(sg); 1102 len = sg_dma_len(sg); 1103 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { 1104 pr_debug("%s: addr %llx or len %x is not page aligned!\n", 1105 __func__, addr, len); 1106 } 1107 } 1108 } 1109 #endif /* CONFIG_DMA_API_DEBUG */ 1110 return sg_table; 1111 } 1112 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); 1113 1114 /** 1115 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 1116 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 1117 * dma_buf_ops. 1118 * @attach: [in] attachment to unmap buffer from 1119 * @sg_table: [in] scatterlist info of the buffer to unmap 1120 * @direction: [in] direction of DMA transfer 1121 * 1122 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). 1123 */ 1124 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 1125 struct sg_table *sg_table, 1126 enum dma_data_direction direction) 1127 { 1128 might_sleep(); 1129 1130 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 1131 return; 1132 1133 if (dma_buf_attachment_is_dynamic(attach)) 1134 dma_resv_assert_held(attach->dmabuf->resv); 1135 1136 if (attach->sgt == sg_table) 1137 return; 1138 1139 if (dma_buf_is_dynamic(attach->dmabuf)) 1140 dma_resv_assert_held(attach->dmabuf->resv); 1141 1142 __unmap_dma_buf(attach, sg_table, direction); 1143 1144 if (dma_buf_is_dynamic(attach->dmabuf) && 1145 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 1146 dma_buf_unpin(attach); 1147 } 1148 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); 1149 1150 /** 1151 * dma_buf_move_notify - notify attachments that DMA-buf is moving 1152 * 1153 * @dmabuf: [in] buffer which is moving 1154 * 1155 * Informs all attachmenst that they need to destroy and recreated all their 1156 * mappings. 1157 */ 1158 void dma_buf_move_notify(struct dma_buf *dmabuf) 1159 { 1160 struct dma_buf_attachment *attach; 1161 1162 dma_resv_assert_held(dmabuf->resv); 1163 1164 list_for_each_entry(attach, &dmabuf->attachments, node) 1165 if (attach->importer_ops) 1166 attach->importer_ops->move_notify(attach); 1167 } 1168 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); 1169 1170 /** 1171 * DOC: cpu access 1172 * 1173 * There are mutliple reasons for supporting CPU access to a dma buffer object: 1174 * 1175 * - Fallback operations in the kernel, for example when a device is connected 1176 * over USB and the kernel needs to shuffle the data around first before 1177 * sending it away. Cache coherency is handled by braketing any transactions 1178 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() 1179 * access. 1180 * 1181 * Since for most kernel internal dma-buf accesses need the entire buffer, a 1182 * vmap interface is introduced. Note that on very old 32-bit architectures 1183 * vmalloc space might be limited and result in vmap calls failing. 1184 * 1185 * Interfaces:: 1186 * 1187 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map) 1188 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map) 1189 * 1190 * The vmap call can fail if there is no vmap support in the exporter, or if 1191 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference 1192 * count for all vmap access and calls down into the exporter's vmap function 1193 * only when no vmapping exists, and only unmaps it once. Protection against 1194 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. 1195 * 1196 * - For full compatibility on the importer side with existing userspace 1197 * interfaces, which might already support mmap'ing buffers. This is needed in 1198 * many processing pipelines (e.g. feeding a software rendered image into a 1199 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION 1200 * framework already supported this and for DMA buffer file descriptors to 1201 * replace ION buffers mmap support was needed. 1202 * 1203 * There is no special interfaces, userspace simply calls mmap on the dma-buf 1204 * fd. But like for CPU access there's a need to braket the actual access, 1205 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that 1206 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must 1207 * be restarted. 1208 * 1209 * Some systems might need some sort of cache coherency management e.g. when 1210 * CPU and GPU domains are being accessed through dma-buf at the same time. 1211 * To circumvent this problem there are begin/end coherency markers, that 1212 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace 1213 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The 1214 * sequence would be used like following: 1215 * 1216 * - mmap dma-buf fd 1217 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write 1218 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you 1219 * want (with the new data being consumed by say the GPU or the scanout 1220 * device) 1221 * - munmap once you don't need the buffer any more 1222 * 1223 * For correctness and optimal performance, it is always required to use 1224 * SYNC_START and SYNC_END before and after, respectively, when accessing the 1225 * mapped address. Userspace cannot rely on coherent access, even when there 1226 * are systems where it just works without calling these ioctls. 1227 * 1228 * - And as a CPU fallback in userspace processing pipelines. 1229 * 1230 * Similar to the motivation for kernel cpu access it is again important that 1231 * the userspace code of a given importing subsystem can use the same 1232 * interfaces with a imported dma-buf buffer object as with a native buffer 1233 * object. This is especially important for drm where the userspace part of 1234 * contemporary OpenGL, X, and other drivers is huge, and reworking them to 1235 * use a different way to mmap a buffer rather invasive. 1236 * 1237 * The assumption in the current dma-buf interfaces is that redirecting the 1238 * initial mmap is all that's needed. A survey of some of the existing 1239 * subsystems shows that no driver seems to do any nefarious thing like 1240 * syncing up with outstanding asynchronous processing on the device or 1241 * allocating special resources at fault time. So hopefully this is good 1242 * enough, since adding interfaces to intercept pagefaults and allow pte 1243 * shootdowns would increase the complexity quite a bit. 1244 * 1245 * Interface:: 1246 * 1247 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, 1248 * unsigned long); 1249 * 1250 * If the importing subsystem simply provides a special-purpose mmap call to 1251 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will 1252 * equally achieve that for a dma-buf object. 1253 */ 1254 1255 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1256 enum dma_data_direction direction) 1257 { 1258 bool write = (direction == DMA_BIDIRECTIONAL || 1259 direction == DMA_TO_DEVICE); 1260 struct dma_resv *resv = dmabuf->resv; 1261 long ret; 1262 1263 /* Wait on any implicit rendering fences */ 1264 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write), 1265 true, MAX_SCHEDULE_TIMEOUT); 1266 if (ret < 0) 1267 return ret; 1268 1269 return 0; 1270 } 1271 1272 /** 1273 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 1274 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 1275 * preparations. Coherency is only guaranteed in the specified range for the 1276 * specified access direction. 1277 * @dmabuf: [in] buffer to prepare cpu access for. 1278 * @direction: [in] length of range for cpu access. 1279 * 1280 * After the cpu access is complete the caller should call 1281 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is 1282 * it guaranteed to be coherent with other DMA access. 1283 * 1284 * This function will also wait for any DMA transactions tracked through 1285 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit 1286 * synchronization this function will only ensure cache coherency, callers must 1287 * ensure synchronization with such DMA transactions on their own. 1288 * 1289 * Can return negative error values, returns 0 on success. 1290 */ 1291 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1292 enum dma_data_direction direction) 1293 { 1294 int ret = 0; 1295 1296 if (WARN_ON(!dmabuf)) 1297 return -EINVAL; 1298 1299 might_lock(&dmabuf->resv->lock.base); 1300 1301 if (dmabuf->ops->begin_cpu_access) 1302 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 1303 1304 /* Ensure that all fences are waited upon - but we first allow 1305 * the native handler the chance to do so more efficiently if it 1306 * chooses. A double invocation here will be reasonably cheap no-op. 1307 */ 1308 if (ret == 0) 1309 ret = __dma_buf_begin_cpu_access(dmabuf, direction); 1310 1311 return ret; 1312 } 1313 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); 1314 1315 /** 1316 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 1317 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 1318 * actions. Coherency is only guaranteed in the specified range for the 1319 * specified access direction. 1320 * @dmabuf: [in] buffer to complete cpu access for. 1321 * @direction: [in] length of range for cpu access. 1322 * 1323 * This terminates CPU access started with dma_buf_begin_cpu_access(). 1324 * 1325 * Can return negative error values, returns 0 on success. 1326 */ 1327 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1328 enum dma_data_direction direction) 1329 { 1330 int ret = 0; 1331 1332 WARN_ON(!dmabuf); 1333 1334 might_lock(&dmabuf->resv->lock.base); 1335 1336 if (dmabuf->ops->end_cpu_access) 1337 ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 1338 1339 return ret; 1340 } 1341 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); 1342 1343 1344 /** 1345 * dma_buf_mmap - Setup up a userspace mmap with the given vma 1346 * @dmabuf: [in] buffer that should back the vma 1347 * @vma: [in] vma for the mmap 1348 * @pgoff: [in] offset in pages where this mmap should start within the 1349 * dma-buf buffer. 1350 * 1351 * This function adjusts the passed in vma so that it points at the file of the 1352 * dma_buf operation. It also adjusts the starting pgoff and does bounds 1353 * checking on the size of the vma. Then it calls the exporters mmap function to 1354 * set up the mapping. 1355 * 1356 * Can return negative error values, returns 0 on success. 1357 */ 1358 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1359 unsigned long pgoff) 1360 { 1361 if (WARN_ON(!dmabuf || !vma)) 1362 return -EINVAL; 1363 1364 /* check if buffer supports mmap */ 1365 if (!dmabuf->ops->mmap) 1366 return -EINVAL; 1367 1368 /* check for offset overflow */ 1369 if (pgoff + vma_pages(vma) < pgoff) 1370 return -EOVERFLOW; 1371 1372 /* check for overflowing the buffer's size */ 1373 if (pgoff + vma_pages(vma) > 1374 dmabuf->size >> PAGE_SHIFT) 1375 return -EINVAL; 1376 1377 /* readjust the vma */ 1378 vma_set_file(vma, dmabuf->file); 1379 vma->vm_pgoff = pgoff; 1380 1381 return dmabuf->ops->mmap(dmabuf, vma); 1382 } 1383 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); 1384 1385 /** 1386 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1387 * address space. Same restrictions as for vmap and friends apply. 1388 * @dmabuf: [in] buffer to vmap 1389 * @map: [out] returns the vmap pointer 1390 * 1391 * This call may fail due to lack of virtual mapping address space. 1392 * These calls are optional in drivers. The intended use for them 1393 * is for mapping objects linear in kernel space for high use objects. 1394 * 1395 * To ensure coherency users must call dma_buf_begin_cpu_access() and 1396 * dma_buf_end_cpu_access() around any cpu access performed through this 1397 * mapping. 1398 * 1399 * Returns 0 on success, or a negative errno code otherwise. 1400 */ 1401 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 1402 { 1403 struct iosys_map ptr; 1404 int ret = 0; 1405 1406 iosys_map_clear(map); 1407 1408 if (WARN_ON(!dmabuf)) 1409 return -EINVAL; 1410 1411 if (!dmabuf->ops->vmap) 1412 return -EINVAL; 1413 1414 mutex_lock(&dmabuf->lock); 1415 if (dmabuf->vmapping_counter) { 1416 dmabuf->vmapping_counter++; 1417 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1418 *map = dmabuf->vmap_ptr; 1419 goto out_unlock; 1420 } 1421 1422 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr)); 1423 1424 ret = dmabuf->ops->vmap(dmabuf, &ptr); 1425 if (WARN_ON_ONCE(ret)) 1426 goto out_unlock; 1427 1428 dmabuf->vmap_ptr = ptr; 1429 dmabuf->vmapping_counter = 1; 1430 1431 *map = dmabuf->vmap_ptr; 1432 1433 out_unlock: 1434 mutex_unlock(&dmabuf->lock); 1435 return ret; 1436 } 1437 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); 1438 1439 /** 1440 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 1441 * @dmabuf: [in] buffer to vunmap 1442 * @map: [in] vmap pointer to vunmap 1443 */ 1444 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 1445 { 1446 if (WARN_ON(!dmabuf)) 1447 return; 1448 1449 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr)); 1450 BUG_ON(dmabuf->vmapping_counter == 0); 1451 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map)); 1452 1453 mutex_lock(&dmabuf->lock); 1454 if (--dmabuf->vmapping_counter == 0) { 1455 if (dmabuf->ops->vunmap) 1456 dmabuf->ops->vunmap(dmabuf, map); 1457 iosys_map_clear(&dmabuf->vmap_ptr); 1458 } 1459 mutex_unlock(&dmabuf->lock); 1460 } 1461 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); 1462 1463 #ifdef CONFIG_DEBUG_FS 1464 static int dma_buf_debug_show(struct seq_file *s, void *unused) 1465 { 1466 struct dma_buf *buf_obj; 1467 struct dma_buf_attachment *attach_obj; 1468 int count = 0, attach_count; 1469 size_t size = 0; 1470 int ret; 1471 1472 ret = mutex_lock_interruptible(&db_list.lock); 1473 1474 if (ret) 1475 return ret; 1476 1477 seq_puts(s, "\nDma-buf Objects:\n"); 1478 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", 1479 "size", "flags", "mode", "count", "ino"); 1480 1481 list_for_each_entry(buf_obj, &db_list.head, list_node) { 1482 1483 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); 1484 if (ret) 1485 goto error_unlock; 1486 1487 1488 spin_lock(&buf_obj->name_lock); 1489 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", 1490 buf_obj->size, 1491 buf_obj->file->f_flags, buf_obj->file->f_mode, 1492 file_count(buf_obj->file), 1493 buf_obj->exp_name, 1494 file_inode(buf_obj->file)->i_ino, 1495 buf_obj->name ?: "<none>"); 1496 spin_unlock(&buf_obj->name_lock); 1497 1498 dma_resv_describe(buf_obj->resv, s); 1499 1500 seq_puts(s, "\tAttached Devices:\n"); 1501 attach_count = 0; 1502 1503 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 1504 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); 1505 attach_count++; 1506 } 1507 dma_resv_unlock(buf_obj->resv); 1508 1509 seq_printf(s, "Total %d devices attached\n\n", 1510 attach_count); 1511 1512 count++; 1513 size += buf_obj->size; 1514 } 1515 1516 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 1517 1518 mutex_unlock(&db_list.lock); 1519 return 0; 1520 1521 error_unlock: 1522 mutex_unlock(&db_list.lock); 1523 return ret; 1524 } 1525 1526 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1527 1528 static struct dentry *dma_buf_debugfs_dir; 1529 1530 static int dma_buf_init_debugfs(void) 1531 { 1532 struct dentry *d; 1533 int err = 0; 1534 1535 d = debugfs_create_dir("dma_buf", NULL); 1536 if (IS_ERR(d)) 1537 return PTR_ERR(d); 1538 1539 dma_buf_debugfs_dir = d; 1540 1541 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, 1542 NULL, &dma_buf_debug_fops); 1543 if (IS_ERR(d)) { 1544 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 1545 debugfs_remove_recursive(dma_buf_debugfs_dir); 1546 dma_buf_debugfs_dir = NULL; 1547 err = PTR_ERR(d); 1548 } 1549 1550 return err; 1551 } 1552 1553 static void dma_buf_uninit_debugfs(void) 1554 { 1555 debugfs_remove_recursive(dma_buf_debugfs_dir); 1556 } 1557 #else 1558 static inline int dma_buf_init_debugfs(void) 1559 { 1560 return 0; 1561 } 1562 static inline void dma_buf_uninit_debugfs(void) 1563 { 1564 } 1565 #endif 1566 1567 static int __init dma_buf_init(void) 1568 { 1569 int ret; 1570 1571 ret = dma_buf_init_sysfs_statistics(); 1572 if (ret) 1573 return ret; 1574 1575 dma_buf_mnt = kern_mount(&dma_buf_fs_type); 1576 if (IS_ERR(dma_buf_mnt)) 1577 return PTR_ERR(dma_buf_mnt); 1578 1579 mutex_init(&db_list.lock); 1580 INIT_LIST_HEAD(&db_list.head); 1581 dma_buf_init_debugfs(); 1582 return 0; 1583 } 1584 subsys_initcall(dma_buf_init); 1585 1586 static void __exit dma_buf_deinit(void) 1587 { 1588 dma_buf_uninit_debugfs(); 1589 kern_unmount(dma_buf_mnt); 1590 dma_buf_uninit_sysfs_statistics(); 1591 } 1592 __exitcall(dma_buf_deinit); 1593