1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/dma-buf.h> 17 #include <linux/dma-fence.h> 18 #include <linux/anon_inodes.h> 19 #include <linux/export.h> 20 #include <linux/debugfs.h> 21 #include <linux/module.h> 22 #include <linux/seq_file.h> 23 #include <linux/poll.h> 24 #include <linux/dma-resv.h> 25 #include <linux/mm.h> 26 #include <linux/mount.h> 27 #include <linux/pseudo_fs.h> 28 29 #include <uapi/linux/dma-buf.h> 30 #include <uapi/linux/magic.h> 31 32 #include "dma-buf-sysfs-stats.h" 33 34 static inline int is_dma_buf_file(struct file *); 35 36 struct dma_buf_list { 37 struct list_head head; 38 struct mutex lock; 39 }; 40 41 static struct dma_buf_list db_list; 42 43 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) 44 { 45 struct dma_buf *dmabuf; 46 char name[DMA_BUF_NAME_LEN]; 47 size_t ret = 0; 48 49 dmabuf = dentry->d_fsdata; 50 spin_lock(&dmabuf->name_lock); 51 if (dmabuf->name) 52 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); 53 spin_unlock(&dmabuf->name_lock); 54 55 return dynamic_dname(dentry, buffer, buflen, "/%s:%s", 56 dentry->d_name.name, ret > 0 ? name : ""); 57 } 58 59 static void dma_buf_release(struct dentry *dentry) 60 { 61 struct dma_buf *dmabuf; 62 63 dmabuf = dentry->d_fsdata; 64 if (unlikely(!dmabuf)) 65 return; 66 67 BUG_ON(dmabuf->vmapping_counter); 68 69 /* 70 * If you hit this BUG() it could mean: 71 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else 72 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback 73 */ 74 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active); 75 76 dma_buf_stats_teardown(dmabuf); 77 dmabuf->ops->release(dmabuf); 78 79 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 80 dma_resv_fini(dmabuf->resv); 81 82 WARN_ON(!list_empty(&dmabuf->attachments)); 83 module_put(dmabuf->owner); 84 kfree(dmabuf->name); 85 kfree(dmabuf); 86 } 87 88 static int dma_buf_file_release(struct inode *inode, struct file *file) 89 { 90 struct dma_buf *dmabuf; 91 92 if (!is_dma_buf_file(file)) 93 return -EINVAL; 94 95 dmabuf = file->private_data; 96 97 mutex_lock(&db_list.lock); 98 list_del(&dmabuf->list_node); 99 mutex_unlock(&db_list.lock); 100 101 return 0; 102 } 103 104 static const struct dentry_operations dma_buf_dentry_ops = { 105 .d_dname = dmabuffs_dname, 106 .d_release = dma_buf_release, 107 }; 108 109 static struct vfsmount *dma_buf_mnt; 110 111 static int dma_buf_fs_init_context(struct fs_context *fc) 112 { 113 struct pseudo_fs_context *ctx; 114 115 ctx = init_pseudo(fc, DMA_BUF_MAGIC); 116 if (!ctx) 117 return -ENOMEM; 118 ctx->dops = &dma_buf_dentry_ops; 119 return 0; 120 } 121 122 static struct file_system_type dma_buf_fs_type = { 123 .name = "dmabuf", 124 .init_fs_context = dma_buf_fs_init_context, 125 .kill_sb = kill_anon_super, 126 }; 127 128 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 129 { 130 struct dma_buf *dmabuf; 131 132 if (!is_dma_buf_file(file)) 133 return -EINVAL; 134 135 dmabuf = file->private_data; 136 137 /* check if buffer supports mmap */ 138 if (!dmabuf->ops->mmap) 139 return -EINVAL; 140 141 /* check for overflowing the buffer's size */ 142 if (vma->vm_pgoff + vma_pages(vma) > 143 dmabuf->size >> PAGE_SHIFT) 144 return -EINVAL; 145 146 return dmabuf->ops->mmap(dmabuf, vma); 147 } 148 149 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 150 { 151 struct dma_buf *dmabuf; 152 loff_t base; 153 154 if (!is_dma_buf_file(file)) 155 return -EBADF; 156 157 dmabuf = file->private_data; 158 159 /* only support discovering the end of the buffer, 160 but also allow SEEK_SET to maintain the idiomatic 161 SEEK_END(0), SEEK_CUR(0) pattern */ 162 if (whence == SEEK_END) 163 base = dmabuf->size; 164 else if (whence == SEEK_SET) 165 base = 0; 166 else 167 return -EINVAL; 168 169 if (offset != 0) 170 return -EINVAL; 171 172 return base + offset; 173 } 174 175 /** 176 * DOC: implicit fence polling 177 * 178 * To support cross-device and cross-driver synchronization of buffer access 179 * implicit fences (represented internally in the kernel with &struct dma_fence) 180 * can be attached to a &dma_buf. The glue for that and a few related things are 181 * provided in the &dma_resv structure. 182 * 183 * Userspace can query the state of these implicitly tracked fences using poll() 184 * and related system calls: 185 * 186 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 187 * most recent write or exclusive fence. 188 * 189 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 190 * all attached fences, shared and exclusive ones. 191 * 192 * Note that this only signals the completion of the respective fences, i.e. the 193 * DMA transfers are complete. Cache flushing and any other necessary 194 * preparations before CPU access can begin still need to happen. 195 */ 196 197 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 198 { 199 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; 200 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll); 201 unsigned long flags; 202 203 spin_lock_irqsave(&dcb->poll->lock, flags); 204 wake_up_locked_poll(dcb->poll, dcb->active); 205 dcb->active = 0; 206 spin_unlock_irqrestore(&dcb->poll->lock, flags); 207 dma_fence_put(fence); 208 /* Paired with get_file in dma_buf_poll */ 209 fput(dmabuf->file); 210 } 211 212 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write, 213 struct dma_buf_poll_cb_t *dcb) 214 { 215 struct dma_resv_iter cursor; 216 struct dma_fence *fence; 217 int r; 218 219 dma_resv_for_each_fence(&cursor, resv, write, fence) { 220 dma_fence_get(fence); 221 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 222 if (!r) 223 return true; 224 dma_fence_put(fence); 225 } 226 227 return false; 228 } 229 230 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 231 { 232 struct dma_buf *dmabuf; 233 struct dma_resv *resv; 234 __poll_t events; 235 236 dmabuf = file->private_data; 237 if (!dmabuf || !dmabuf->resv) 238 return EPOLLERR; 239 240 resv = dmabuf->resv; 241 242 poll_wait(file, &dmabuf->poll, poll); 243 244 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 245 if (!events) 246 return 0; 247 248 dma_resv_lock(resv, NULL); 249 250 if (events & EPOLLOUT) { 251 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out; 252 253 /* Check that callback isn't busy */ 254 spin_lock_irq(&dmabuf->poll.lock); 255 if (dcb->active) 256 events &= ~EPOLLOUT; 257 else 258 dcb->active = EPOLLOUT; 259 spin_unlock_irq(&dmabuf->poll.lock); 260 261 if (events & EPOLLOUT) { 262 /* Paired with fput in dma_buf_poll_cb */ 263 get_file(dmabuf->file); 264 265 if (!dma_buf_poll_add_cb(resv, true, dcb)) 266 /* No callback queued, wake up any other waiters */ 267 dma_buf_poll_cb(NULL, &dcb->cb); 268 else 269 events &= ~EPOLLOUT; 270 } 271 } 272 273 if (events & EPOLLIN) { 274 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in; 275 276 /* Check that callback isn't busy */ 277 spin_lock_irq(&dmabuf->poll.lock); 278 if (dcb->active) 279 events &= ~EPOLLIN; 280 else 281 dcb->active = EPOLLIN; 282 spin_unlock_irq(&dmabuf->poll.lock); 283 284 if (events & EPOLLIN) { 285 /* Paired with fput in dma_buf_poll_cb */ 286 get_file(dmabuf->file); 287 288 if (!dma_buf_poll_add_cb(resv, false, dcb)) 289 /* No callback queued, wake up any other waiters */ 290 dma_buf_poll_cb(NULL, &dcb->cb); 291 else 292 events &= ~EPOLLIN; 293 } 294 } 295 296 dma_resv_unlock(resv); 297 return events; 298 } 299 300 /** 301 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. 302 * It could support changing the name of the dma-buf if the same 303 * piece of memory is used for multiple purpose between different devices. 304 * 305 * @dmabuf: [in] dmabuf buffer that will be renamed. 306 * @buf: [in] A piece of userspace memory that contains the name of 307 * the dma-buf. 308 * 309 * Returns 0 on success. If the dma-buf buffer is already attached to 310 * devices, return -EBUSY. 311 * 312 */ 313 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) 314 { 315 char *name = strndup_user(buf, DMA_BUF_NAME_LEN); 316 317 if (IS_ERR(name)) 318 return PTR_ERR(name); 319 320 spin_lock(&dmabuf->name_lock); 321 kfree(dmabuf->name); 322 dmabuf->name = name; 323 spin_unlock(&dmabuf->name_lock); 324 325 return 0; 326 } 327 328 static long dma_buf_ioctl(struct file *file, 329 unsigned int cmd, unsigned long arg) 330 { 331 struct dma_buf *dmabuf; 332 struct dma_buf_sync sync; 333 enum dma_data_direction direction; 334 int ret; 335 336 dmabuf = file->private_data; 337 338 switch (cmd) { 339 case DMA_BUF_IOCTL_SYNC: 340 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) 341 return -EFAULT; 342 343 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) 344 return -EINVAL; 345 346 switch (sync.flags & DMA_BUF_SYNC_RW) { 347 case DMA_BUF_SYNC_READ: 348 direction = DMA_FROM_DEVICE; 349 break; 350 case DMA_BUF_SYNC_WRITE: 351 direction = DMA_TO_DEVICE; 352 break; 353 case DMA_BUF_SYNC_RW: 354 direction = DMA_BIDIRECTIONAL; 355 break; 356 default: 357 return -EINVAL; 358 } 359 360 if (sync.flags & DMA_BUF_SYNC_END) 361 ret = dma_buf_end_cpu_access(dmabuf, direction); 362 else 363 ret = dma_buf_begin_cpu_access(dmabuf, direction); 364 365 return ret; 366 367 case DMA_BUF_SET_NAME_A: 368 case DMA_BUF_SET_NAME_B: 369 return dma_buf_set_name(dmabuf, (const char __user *)arg); 370 371 default: 372 return -ENOTTY; 373 } 374 } 375 376 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) 377 { 378 struct dma_buf *dmabuf = file->private_data; 379 380 seq_printf(m, "size:\t%zu\n", dmabuf->size); 381 /* Don't count the temporary reference taken inside procfs seq_show */ 382 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 383 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 384 spin_lock(&dmabuf->name_lock); 385 if (dmabuf->name) 386 seq_printf(m, "name:\t%s\n", dmabuf->name); 387 spin_unlock(&dmabuf->name_lock); 388 } 389 390 static const struct file_operations dma_buf_fops = { 391 .release = dma_buf_file_release, 392 .mmap = dma_buf_mmap_internal, 393 .llseek = dma_buf_llseek, 394 .poll = dma_buf_poll, 395 .unlocked_ioctl = dma_buf_ioctl, 396 .compat_ioctl = compat_ptr_ioctl, 397 .show_fdinfo = dma_buf_show_fdinfo, 398 }; 399 400 /* 401 * is_dma_buf_file - Check if struct file* is associated with dma_buf 402 */ 403 static inline int is_dma_buf_file(struct file *file) 404 { 405 return file->f_op == &dma_buf_fops; 406 } 407 408 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) 409 { 410 struct file *file; 411 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); 412 413 if (IS_ERR(inode)) 414 return ERR_CAST(inode); 415 416 inode->i_size = dmabuf->size; 417 inode_set_bytes(inode, dmabuf->size); 418 419 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", 420 flags, &dma_buf_fops); 421 if (IS_ERR(file)) 422 goto err_alloc_file; 423 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 424 file->private_data = dmabuf; 425 file->f_path.dentry->d_fsdata = dmabuf; 426 427 return file; 428 429 err_alloc_file: 430 iput(inode); 431 return file; 432 } 433 434 /** 435 * DOC: dma buf device access 436 * 437 * For device DMA access to a shared DMA buffer the usual sequence of operations 438 * is fairly simple: 439 * 440 * 1. The exporter defines his exporter instance using 441 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private 442 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace 443 * as a file descriptor by calling dma_buf_fd(). 444 * 445 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer 446 * to share with: First the filedescriptor is converted to a &dma_buf using 447 * dma_buf_get(). Then the buffer is attached to the device using 448 * dma_buf_attach(). 449 * 450 * Up to this stage the exporter is still free to migrate or reallocate the 451 * backing storage. 452 * 453 * 3. Once the buffer is attached to all devices userspace can initiate DMA 454 * access to the shared buffer. In the kernel this is done by calling 455 * dma_buf_map_attachment() and dma_buf_unmap_attachment(). 456 * 457 * 4. Once a driver is done with a shared buffer it needs to call 458 * dma_buf_detach() (after cleaning up any mappings) and then release the 459 * reference acquired with dma_buf_get() by calling dma_buf_put(). 460 * 461 * For the detailed semantics exporters are expected to implement see 462 * &dma_buf_ops. 463 */ 464 465 /** 466 * dma_buf_export - Creates a new dma_buf, and associates an anon file 467 * with this buffer, so it can be exported. 468 * Also connect the allocator specific data and ops to the buffer. 469 * Additionally, provide a name string for exporter; useful in debugging. 470 * 471 * @exp_info: [in] holds all the export related information provided 472 * by the exporter. see &struct dma_buf_export_info 473 * for further details. 474 * 475 * Returns, on success, a newly created struct dma_buf object, which wraps the 476 * supplied private data and operations for struct dma_buf_ops. On either 477 * missing ops, or error in allocating struct dma_buf, will return negative 478 * error. 479 * 480 * For most cases the easiest way to create @exp_info is through the 481 * %DEFINE_DMA_BUF_EXPORT_INFO macro. 482 */ 483 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 484 { 485 struct dma_buf *dmabuf; 486 struct dma_resv *resv = exp_info->resv; 487 struct file *file; 488 size_t alloc_size = sizeof(struct dma_buf); 489 int ret; 490 491 if (!exp_info->resv) 492 alloc_size += sizeof(struct dma_resv); 493 else 494 /* prevent &dma_buf[1] == dma_buf->resv */ 495 alloc_size += 1; 496 497 if (WARN_ON(!exp_info->priv 498 || !exp_info->ops 499 || !exp_info->ops->map_dma_buf 500 || !exp_info->ops->unmap_dma_buf 501 || !exp_info->ops->release)) { 502 return ERR_PTR(-EINVAL); 503 } 504 505 if (WARN_ON(exp_info->ops->cache_sgt_mapping && 506 (exp_info->ops->pin || exp_info->ops->unpin))) 507 return ERR_PTR(-EINVAL); 508 509 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) 510 return ERR_PTR(-EINVAL); 511 512 if (!try_module_get(exp_info->owner)) 513 return ERR_PTR(-ENOENT); 514 515 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 516 if (!dmabuf) { 517 ret = -ENOMEM; 518 goto err_module; 519 } 520 521 dmabuf->priv = exp_info->priv; 522 dmabuf->ops = exp_info->ops; 523 dmabuf->size = exp_info->size; 524 dmabuf->exp_name = exp_info->exp_name; 525 dmabuf->owner = exp_info->owner; 526 spin_lock_init(&dmabuf->name_lock); 527 init_waitqueue_head(&dmabuf->poll); 528 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; 529 dmabuf->cb_in.active = dmabuf->cb_out.active = 0; 530 531 if (!resv) { 532 resv = (struct dma_resv *)&dmabuf[1]; 533 dma_resv_init(resv); 534 } 535 dmabuf->resv = resv; 536 537 file = dma_buf_getfile(dmabuf, exp_info->flags); 538 if (IS_ERR(file)) { 539 ret = PTR_ERR(file); 540 goto err_dmabuf; 541 } 542 543 file->f_mode |= FMODE_LSEEK; 544 dmabuf->file = file; 545 546 ret = dma_buf_stats_setup(dmabuf); 547 if (ret) 548 goto err_sysfs; 549 550 mutex_init(&dmabuf->lock); 551 INIT_LIST_HEAD(&dmabuf->attachments); 552 553 mutex_lock(&db_list.lock); 554 list_add(&dmabuf->list_node, &db_list.head); 555 mutex_unlock(&db_list.lock); 556 557 return dmabuf; 558 559 err_sysfs: 560 /* 561 * Set file->f_path.dentry->d_fsdata to NULL so that when 562 * dma_buf_release() gets invoked by dentry_ops, it exits 563 * early before calling the release() dma_buf op. 564 */ 565 file->f_path.dentry->d_fsdata = NULL; 566 fput(file); 567 err_dmabuf: 568 kfree(dmabuf); 569 err_module: 570 module_put(exp_info->owner); 571 return ERR_PTR(ret); 572 } 573 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF); 574 575 /** 576 * dma_buf_fd - returns a file descriptor for the given struct dma_buf 577 * @dmabuf: [in] pointer to dma_buf for which fd is required. 578 * @flags: [in] flags to give to fd 579 * 580 * On success, returns an associated 'fd'. Else, returns error. 581 */ 582 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 583 { 584 int fd; 585 586 if (!dmabuf || !dmabuf->file) 587 return -EINVAL; 588 589 fd = get_unused_fd_flags(flags); 590 if (fd < 0) 591 return fd; 592 593 fd_install(fd, dmabuf->file); 594 595 return fd; 596 } 597 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF); 598 599 /** 600 * dma_buf_get - returns the struct dma_buf related to an fd 601 * @fd: [in] fd associated with the struct dma_buf to be returned 602 * 603 * On success, returns the struct dma_buf associated with an fd; uses 604 * file's refcounting done by fget to increase refcount. returns ERR_PTR 605 * otherwise. 606 */ 607 struct dma_buf *dma_buf_get(int fd) 608 { 609 struct file *file; 610 611 file = fget(fd); 612 613 if (!file) 614 return ERR_PTR(-EBADF); 615 616 if (!is_dma_buf_file(file)) { 617 fput(file); 618 return ERR_PTR(-EINVAL); 619 } 620 621 return file->private_data; 622 } 623 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF); 624 625 /** 626 * dma_buf_put - decreases refcount of the buffer 627 * @dmabuf: [in] buffer to reduce refcount of 628 * 629 * Uses file's refcounting done implicitly by fput(). 630 * 631 * If, as a result of this call, the refcount becomes 0, the 'release' file 632 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc 633 * in turn, and frees the memory allocated for dmabuf when exported. 634 */ 635 void dma_buf_put(struct dma_buf *dmabuf) 636 { 637 if (WARN_ON(!dmabuf || !dmabuf->file)) 638 return; 639 640 fput(dmabuf->file); 641 } 642 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF); 643 644 static void mangle_sg_table(struct sg_table *sg_table) 645 { 646 #ifdef CONFIG_DMABUF_DEBUG 647 int i; 648 struct scatterlist *sg; 649 650 /* To catch abuse of the underlying struct page by importers mix 651 * up the bits, but take care to preserve the low SG_ bits to 652 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf 653 * before passing the sgt back to the exporter. */ 654 for_each_sgtable_sg(sg_table, sg, i) 655 sg->page_link ^= ~0xffUL; 656 #endif 657 658 } 659 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach, 660 enum dma_data_direction direction) 661 { 662 struct sg_table *sg_table; 663 664 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 665 666 if (!IS_ERR_OR_NULL(sg_table)) 667 mangle_sg_table(sg_table); 668 669 return sg_table; 670 } 671 672 /** 673 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list 674 * @dmabuf: [in] buffer to attach device to. 675 * @dev: [in] device to be attached. 676 * @importer_ops: [in] importer operations for the attachment 677 * @importer_priv: [in] importer private pointer for the attachment 678 * 679 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 680 * must be cleaned up by calling dma_buf_detach(). 681 * 682 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach 683 * functionality. 684 * 685 * Returns: 686 * 687 * A pointer to newly created &dma_buf_attachment on success, or a negative 688 * error code wrapped into a pointer on failure. 689 * 690 * Note that this can fail if the backing storage of @dmabuf is in a place not 691 * accessible to @dev, and cannot be moved to a more suitable place. This is 692 * indicated with the error code -EBUSY. 693 */ 694 struct dma_buf_attachment * 695 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 696 const struct dma_buf_attach_ops *importer_ops, 697 void *importer_priv) 698 { 699 struct dma_buf_attachment *attach; 700 int ret; 701 702 if (WARN_ON(!dmabuf || !dev)) 703 return ERR_PTR(-EINVAL); 704 705 if (WARN_ON(importer_ops && !importer_ops->move_notify)) 706 return ERR_PTR(-EINVAL); 707 708 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 709 if (!attach) 710 return ERR_PTR(-ENOMEM); 711 712 attach->dev = dev; 713 attach->dmabuf = dmabuf; 714 if (importer_ops) 715 attach->peer2peer = importer_ops->allow_peer2peer; 716 attach->importer_ops = importer_ops; 717 attach->importer_priv = importer_priv; 718 719 if (dmabuf->ops->attach) { 720 ret = dmabuf->ops->attach(dmabuf, attach); 721 if (ret) 722 goto err_attach; 723 } 724 dma_resv_lock(dmabuf->resv, NULL); 725 list_add(&attach->node, &dmabuf->attachments); 726 dma_resv_unlock(dmabuf->resv); 727 728 /* When either the importer or the exporter can't handle dynamic 729 * mappings we cache the mapping here to avoid issues with the 730 * reservation object lock. 731 */ 732 if (dma_buf_attachment_is_dynamic(attach) != 733 dma_buf_is_dynamic(dmabuf)) { 734 struct sg_table *sgt; 735 736 if (dma_buf_is_dynamic(attach->dmabuf)) { 737 dma_resv_lock(attach->dmabuf->resv, NULL); 738 ret = dmabuf->ops->pin(attach); 739 if (ret) 740 goto err_unlock; 741 } 742 743 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); 744 if (!sgt) 745 sgt = ERR_PTR(-ENOMEM); 746 if (IS_ERR(sgt)) { 747 ret = PTR_ERR(sgt); 748 goto err_unpin; 749 } 750 if (dma_buf_is_dynamic(attach->dmabuf)) 751 dma_resv_unlock(attach->dmabuf->resv); 752 attach->sgt = sgt; 753 attach->dir = DMA_BIDIRECTIONAL; 754 } 755 756 return attach; 757 758 err_attach: 759 kfree(attach); 760 return ERR_PTR(ret); 761 762 err_unpin: 763 if (dma_buf_is_dynamic(attach->dmabuf)) 764 dmabuf->ops->unpin(attach); 765 766 err_unlock: 767 if (dma_buf_is_dynamic(attach->dmabuf)) 768 dma_resv_unlock(attach->dmabuf->resv); 769 770 dma_buf_detach(dmabuf, attach); 771 return ERR_PTR(ret); 772 } 773 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF); 774 775 /** 776 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach 777 * @dmabuf: [in] buffer to attach device to. 778 * @dev: [in] device to be attached. 779 * 780 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static 781 * mapping. 782 */ 783 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 784 struct device *dev) 785 { 786 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); 787 } 788 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF); 789 790 static void __unmap_dma_buf(struct dma_buf_attachment *attach, 791 struct sg_table *sg_table, 792 enum dma_data_direction direction) 793 { 794 /* uses XOR, hence this unmangles */ 795 mangle_sg_table(sg_table); 796 797 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 798 } 799 800 /** 801 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list 802 * @dmabuf: [in] buffer to detach from. 803 * @attach: [in] attachment to be detached; is free'd after this call. 804 * 805 * Clean up a device attachment obtained by calling dma_buf_attach(). 806 * 807 * Optionally this calls &dma_buf_ops.detach for device-specific detach. 808 */ 809 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 810 { 811 if (WARN_ON(!dmabuf || !attach)) 812 return; 813 814 if (attach->sgt) { 815 if (dma_buf_is_dynamic(attach->dmabuf)) 816 dma_resv_lock(attach->dmabuf->resv, NULL); 817 818 __unmap_dma_buf(attach, attach->sgt, attach->dir); 819 820 if (dma_buf_is_dynamic(attach->dmabuf)) { 821 dmabuf->ops->unpin(attach); 822 dma_resv_unlock(attach->dmabuf->resv); 823 } 824 } 825 826 dma_resv_lock(dmabuf->resv, NULL); 827 list_del(&attach->node); 828 dma_resv_unlock(dmabuf->resv); 829 if (dmabuf->ops->detach) 830 dmabuf->ops->detach(dmabuf, attach); 831 832 kfree(attach); 833 } 834 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF); 835 836 /** 837 * dma_buf_pin - Lock down the DMA-buf 838 * @attach: [in] attachment which should be pinned 839 * 840 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may 841 * call this, and only for limited use cases like scanout and not for temporary 842 * pin operations. It is not permitted to allow userspace to pin arbitrary 843 * amounts of buffers through this interface. 844 * 845 * Buffers must be unpinned by calling dma_buf_unpin(). 846 * 847 * Returns: 848 * 0 on success, negative error code on failure. 849 */ 850 int dma_buf_pin(struct dma_buf_attachment *attach) 851 { 852 struct dma_buf *dmabuf = attach->dmabuf; 853 int ret = 0; 854 855 WARN_ON(!dma_buf_attachment_is_dynamic(attach)); 856 857 dma_resv_assert_held(dmabuf->resv); 858 859 if (dmabuf->ops->pin) 860 ret = dmabuf->ops->pin(attach); 861 862 return ret; 863 } 864 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF); 865 866 /** 867 * dma_buf_unpin - Unpin a DMA-buf 868 * @attach: [in] attachment which should be unpinned 869 * 870 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move 871 * any mapping of @attach again and inform the importer through 872 * &dma_buf_attach_ops.move_notify. 873 */ 874 void dma_buf_unpin(struct dma_buf_attachment *attach) 875 { 876 struct dma_buf *dmabuf = attach->dmabuf; 877 878 WARN_ON(!dma_buf_attachment_is_dynamic(attach)); 879 880 dma_resv_assert_held(dmabuf->resv); 881 882 if (dmabuf->ops->unpin) 883 dmabuf->ops->unpin(attach); 884 } 885 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF); 886 887 /** 888 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 889 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 890 * dma_buf_ops. 891 * @attach: [in] attachment whose scatterlist is to be returned 892 * @direction: [in] direction of DMA transfer 893 * 894 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 895 * on error. May return -EINTR if it is interrupted by a signal. 896 * 897 * On success, the DMA addresses and lengths in the returned scatterlist are 898 * PAGE_SIZE aligned. 899 * 900 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that 901 * the underlying backing storage is pinned for as long as a mapping exists, 902 * therefore users/importers should not hold onto a mapping for undue amounts of 903 * time. 904 * 905 * Important: Dynamic importers must wait for the exclusive fence of the struct 906 * dma_resv attached to the DMA-BUF first. 907 */ 908 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 909 enum dma_data_direction direction) 910 { 911 struct sg_table *sg_table; 912 int r; 913 914 might_sleep(); 915 916 if (WARN_ON(!attach || !attach->dmabuf)) 917 return ERR_PTR(-EINVAL); 918 919 if (dma_buf_attachment_is_dynamic(attach)) 920 dma_resv_assert_held(attach->dmabuf->resv); 921 922 if (attach->sgt) { 923 /* 924 * Two mappings with different directions for the same 925 * attachment are not allowed. 926 */ 927 if (attach->dir != direction && 928 attach->dir != DMA_BIDIRECTIONAL) 929 return ERR_PTR(-EBUSY); 930 931 return attach->sgt; 932 } 933 934 if (dma_buf_is_dynamic(attach->dmabuf)) { 935 dma_resv_assert_held(attach->dmabuf->resv); 936 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { 937 r = attach->dmabuf->ops->pin(attach); 938 if (r) 939 return ERR_PTR(r); 940 } 941 } 942 943 sg_table = __map_dma_buf(attach, direction); 944 if (!sg_table) 945 sg_table = ERR_PTR(-ENOMEM); 946 947 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && 948 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 949 attach->dmabuf->ops->unpin(attach); 950 951 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { 952 attach->sgt = sg_table; 953 attach->dir = direction; 954 } 955 956 #ifdef CONFIG_DMA_API_DEBUG 957 if (!IS_ERR(sg_table)) { 958 struct scatterlist *sg; 959 u64 addr; 960 int len; 961 int i; 962 963 for_each_sgtable_dma_sg(sg_table, sg, i) { 964 addr = sg_dma_address(sg); 965 len = sg_dma_len(sg); 966 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { 967 pr_debug("%s: addr %llx or len %x is not page aligned!\n", 968 __func__, addr, len); 969 } 970 } 971 } 972 #endif /* CONFIG_DMA_API_DEBUG */ 973 return sg_table; 974 } 975 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF); 976 977 /** 978 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 979 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 980 * dma_buf_ops. 981 * @attach: [in] attachment to unmap buffer from 982 * @sg_table: [in] scatterlist info of the buffer to unmap 983 * @direction: [in] direction of DMA transfer 984 * 985 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). 986 */ 987 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 988 struct sg_table *sg_table, 989 enum dma_data_direction direction) 990 { 991 might_sleep(); 992 993 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 994 return; 995 996 if (dma_buf_attachment_is_dynamic(attach)) 997 dma_resv_assert_held(attach->dmabuf->resv); 998 999 if (attach->sgt == sg_table) 1000 return; 1001 1002 if (dma_buf_is_dynamic(attach->dmabuf)) 1003 dma_resv_assert_held(attach->dmabuf->resv); 1004 1005 __unmap_dma_buf(attach, sg_table, direction); 1006 1007 if (dma_buf_is_dynamic(attach->dmabuf) && 1008 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 1009 dma_buf_unpin(attach); 1010 } 1011 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF); 1012 1013 /** 1014 * dma_buf_move_notify - notify attachments that DMA-buf is moving 1015 * 1016 * @dmabuf: [in] buffer which is moving 1017 * 1018 * Informs all attachmenst that they need to destroy and recreated all their 1019 * mappings. 1020 */ 1021 void dma_buf_move_notify(struct dma_buf *dmabuf) 1022 { 1023 struct dma_buf_attachment *attach; 1024 1025 dma_resv_assert_held(dmabuf->resv); 1026 1027 list_for_each_entry(attach, &dmabuf->attachments, node) 1028 if (attach->importer_ops) 1029 attach->importer_ops->move_notify(attach); 1030 } 1031 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF); 1032 1033 /** 1034 * DOC: cpu access 1035 * 1036 * There are mutliple reasons for supporting CPU access to a dma buffer object: 1037 * 1038 * - Fallback operations in the kernel, for example when a device is connected 1039 * over USB and the kernel needs to shuffle the data around first before 1040 * sending it away. Cache coherency is handled by braketing any transactions 1041 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() 1042 * access. 1043 * 1044 * Since for most kernel internal dma-buf accesses need the entire buffer, a 1045 * vmap interface is introduced. Note that on very old 32-bit architectures 1046 * vmalloc space might be limited and result in vmap calls failing. 1047 * 1048 * Interfaces:: 1049 * 1050 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) 1051 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct dma_buf_map \*map) 1052 * 1053 * The vmap call can fail if there is no vmap support in the exporter, or if 1054 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference 1055 * count for all vmap access and calls down into the exporter's vmap function 1056 * only when no vmapping exists, and only unmaps it once. Protection against 1057 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex. 1058 * 1059 * - For full compatibility on the importer side with existing userspace 1060 * interfaces, which might already support mmap'ing buffers. This is needed in 1061 * many processing pipelines (e.g. feeding a software rendered image into a 1062 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION 1063 * framework already supported this and for DMA buffer file descriptors to 1064 * replace ION buffers mmap support was needed. 1065 * 1066 * There is no special interfaces, userspace simply calls mmap on the dma-buf 1067 * fd. But like for CPU access there's a need to braket the actual access, 1068 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that 1069 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must 1070 * be restarted. 1071 * 1072 * Some systems might need some sort of cache coherency management e.g. when 1073 * CPU and GPU domains are being accessed through dma-buf at the same time. 1074 * To circumvent this problem there are begin/end coherency markers, that 1075 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace 1076 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The 1077 * sequence would be used like following: 1078 * 1079 * - mmap dma-buf fd 1080 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write 1081 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you 1082 * want (with the new data being consumed by say the GPU or the scanout 1083 * device) 1084 * - munmap once you don't need the buffer any more 1085 * 1086 * For correctness and optimal performance, it is always required to use 1087 * SYNC_START and SYNC_END before and after, respectively, when accessing the 1088 * mapped address. Userspace cannot rely on coherent access, even when there 1089 * are systems where it just works without calling these ioctls. 1090 * 1091 * - And as a CPU fallback in userspace processing pipelines. 1092 * 1093 * Similar to the motivation for kernel cpu access it is again important that 1094 * the userspace code of a given importing subsystem can use the same 1095 * interfaces with a imported dma-buf buffer object as with a native buffer 1096 * object. This is especially important for drm where the userspace part of 1097 * contemporary OpenGL, X, and other drivers is huge, and reworking them to 1098 * use a different way to mmap a buffer rather invasive. 1099 * 1100 * The assumption in the current dma-buf interfaces is that redirecting the 1101 * initial mmap is all that's needed. A survey of some of the existing 1102 * subsystems shows that no driver seems to do any nefarious thing like 1103 * syncing up with outstanding asynchronous processing on the device or 1104 * allocating special resources at fault time. So hopefully this is good 1105 * enough, since adding interfaces to intercept pagefaults and allow pte 1106 * shootdowns would increase the complexity quite a bit. 1107 * 1108 * Interface:: 1109 * 1110 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, 1111 * unsigned long); 1112 * 1113 * If the importing subsystem simply provides a special-purpose mmap call to 1114 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will 1115 * equally achieve that for a dma-buf object. 1116 */ 1117 1118 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1119 enum dma_data_direction direction) 1120 { 1121 bool write = (direction == DMA_BIDIRECTIONAL || 1122 direction == DMA_TO_DEVICE); 1123 struct dma_resv *resv = dmabuf->resv; 1124 long ret; 1125 1126 /* Wait on any implicit rendering fences */ 1127 ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT); 1128 if (ret < 0) 1129 return ret; 1130 1131 return 0; 1132 } 1133 1134 /** 1135 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 1136 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 1137 * preparations. Coherency is only guaranteed in the specified range for the 1138 * specified access direction. 1139 * @dmabuf: [in] buffer to prepare cpu access for. 1140 * @direction: [in] length of range for cpu access. 1141 * 1142 * After the cpu access is complete the caller should call 1143 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is 1144 * it guaranteed to be coherent with other DMA access. 1145 * 1146 * This function will also wait for any DMA transactions tracked through 1147 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit 1148 * synchronization this function will only ensure cache coherency, callers must 1149 * ensure synchronization with such DMA transactions on their own. 1150 * 1151 * Can return negative error values, returns 0 on success. 1152 */ 1153 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1154 enum dma_data_direction direction) 1155 { 1156 int ret = 0; 1157 1158 if (WARN_ON(!dmabuf)) 1159 return -EINVAL; 1160 1161 might_lock(&dmabuf->resv->lock.base); 1162 1163 if (dmabuf->ops->begin_cpu_access) 1164 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 1165 1166 /* Ensure that all fences are waited upon - but we first allow 1167 * the native handler the chance to do so more efficiently if it 1168 * chooses. A double invocation here will be reasonably cheap no-op. 1169 */ 1170 if (ret == 0) 1171 ret = __dma_buf_begin_cpu_access(dmabuf, direction); 1172 1173 return ret; 1174 } 1175 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF); 1176 1177 /** 1178 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 1179 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 1180 * actions. Coherency is only guaranteed in the specified range for the 1181 * specified access direction. 1182 * @dmabuf: [in] buffer to complete cpu access for. 1183 * @direction: [in] length of range for cpu access. 1184 * 1185 * This terminates CPU access started with dma_buf_begin_cpu_access(). 1186 * 1187 * Can return negative error values, returns 0 on success. 1188 */ 1189 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1190 enum dma_data_direction direction) 1191 { 1192 int ret = 0; 1193 1194 WARN_ON(!dmabuf); 1195 1196 might_lock(&dmabuf->resv->lock.base); 1197 1198 if (dmabuf->ops->end_cpu_access) 1199 ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 1200 1201 return ret; 1202 } 1203 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF); 1204 1205 1206 /** 1207 * dma_buf_mmap - Setup up a userspace mmap with the given vma 1208 * @dmabuf: [in] buffer that should back the vma 1209 * @vma: [in] vma for the mmap 1210 * @pgoff: [in] offset in pages where this mmap should start within the 1211 * dma-buf buffer. 1212 * 1213 * This function adjusts the passed in vma so that it points at the file of the 1214 * dma_buf operation. It also adjusts the starting pgoff and does bounds 1215 * checking on the size of the vma. Then it calls the exporters mmap function to 1216 * set up the mapping. 1217 * 1218 * Can return negative error values, returns 0 on success. 1219 */ 1220 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1221 unsigned long pgoff) 1222 { 1223 if (WARN_ON(!dmabuf || !vma)) 1224 return -EINVAL; 1225 1226 /* check if buffer supports mmap */ 1227 if (!dmabuf->ops->mmap) 1228 return -EINVAL; 1229 1230 /* check for offset overflow */ 1231 if (pgoff + vma_pages(vma) < pgoff) 1232 return -EOVERFLOW; 1233 1234 /* check for overflowing the buffer's size */ 1235 if (pgoff + vma_pages(vma) > 1236 dmabuf->size >> PAGE_SHIFT) 1237 return -EINVAL; 1238 1239 /* readjust the vma */ 1240 vma_set_file(vma, dmabuf->file); 1241 vma->vm_pgoff = pgoff; 1242 1243 return dmabuf->ops->mmap(dmabuf, vma); 1244 } 1245 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF); 1246 1247 /** 1248 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1249 * address space. Same restrictions as for vmap and friends apply. 1250 * @dmabuf: [in] buffer to vmap 1251 * @map: [out] returns the vmap pointer 1252 * 1253 * This call may fail due to lack of virtual mapping address space. 1254 * These calls are optional in drivers. The intended use for them 1255 * is for mapping objects linear in kernel space for high use objects. 1256 * 1257 * To ensure coherency users must call dma_buf_begin_cpu_access() and 1258 * dma_buf_end_cpu_access() around any cpu access performed through this 1259 * mapping. 1260 * 1261 * Returns 0 on success, or a negative errno code otherwise. 1262 */ 1263 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 1264 { 1265 struct dma_buf_map ptr; 1266 int ret = 0; 1267 1268 dma_buf_map_clear(map); 1269 1270 if (WARN_ON(!dmabuf)) 1271 return -EINVAL; 1272 1273 if (!dmabuf->ops->vmap) 1274 return -EINVAL; 1275 1276 mutex_lock(&dmabuf->lock); 1277 if (dmabuf->vmapping_counter) { 1278 dmabuf->vmapping_counter++; 1279 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); 1280 *map = dmabuf->vmap_ptr; 1281 goto out_unlock; 1282 } 1283 1284 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr)); 1285 1286 ret = dmabuf->ops->vmap(dmabuf, &ptr); 1287 if (WARN_ON_ONCE(ret)) 1288 goto out_unlock; 1289 1290 dmabuf->vmap_ptr = ptr; 1291 dmabuf->vmapping_counter = 1; 1292 1293 *map = dmabuf->vmap_ptr; 1294 1295 out_unlock: 1296 mutex_unlock(&dmabuf->lock); 1297 return ret; 1298 } 1299 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF); 1300 1301 /** 1302 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 1303 * @dmabuf: [in] buffer to vunmap 1304 * @map: [in] vmap pointer to vunmap 1305 */ 1306 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 1307 { 1308 if (WARN_ON(!dmabuf)) 1309 return; 1310 1311 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); 1312 BUG_ON(dmabuf->vmapping_counter == 0); 1313 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map)); 1314 1315 mutex_lock(&dmabuf->lock); 1316 if (--dmabuf->vmapping_counter == 0) { 1317 if (dmabuf->ops->vunmap) 1318 dmabuf->ops->vunmap(dmabuf, map); 1319 dma_buf_map_clear(&dmabuf->vmap_ptr); 1320 } 1321 mutex_unlock(&dmabuf->lock); 1322 } 1323 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF); 1324 1325 #ifdef CONFIG_DEBUG_FS 1326 static int dma_buf_debug_show(struct seq_file *s, void *unused) 1327 { 1328 struct dma_buf *buf_obj; 1329 struct dma_buf_attachment *attach_obj; 1330 int count = 0, attach_count; 1331 size_t size = 0; 1332 int ret; 1333 1334 ret = mutex_lock_interruptible(&db_list.lock); 1335 1336 if (ret) 1337 return ret; 1338 1339 seq_puts(s, "\nDma-buf Objects:\n"); 1340 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", 1341 "size", "flags", "mode", "count", "ino"); 1342 1343 list_for_each_entry(buf_obj, &db_list.head, list_node) { 1344 1345 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); 1346 if (ret) 1347 goto error_unlock; 1348 1349 1350 spin_lock(&buf_obj->name_lock); 1351 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", 1352 buf_obj->size, 1353 buf_obj->file->f_flags, buf_obj->file->f_mode, 1354 file_count(buf_obj->file), 1355 buf_obj->exp_name, 1356 file_inode(buf_obj->file)->i_ino, 1357 buf_obj->name ?: ""); 1358 spin_unlock(&buf_obj->name_lock); 1359 1360 dma_resv_describe(buf_obj->resv, s); 1361 1362 seq_puts(s, "\tAttached Devices:\n"); 1363 attach_count = 0; 1364 1365 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 1366 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); 1367 attach_count++; 1368 } 1369 dma_resv_unlock(buf_obj->resv); 1370 1371 seq_printf(s, "Total %d devices attached\n\n", 1372 attach_count); 1373 1374 count++; 1375 size += buf_obj->size; 1376 } 1377 1378 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 1379 1380 mutex_unlock(&db_list.lock); 1381 return 0; 1382 1383 error_unlock: 1384 mutex_unlock(&db_list.lock); 1385 return ret; 1386 } 1387 1388 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1389 1390 static struct dentry *dma_buf_debugfs_dir; 1391 1392 static int dma_buf_init_debugfs(void) 1393 { 1394 struct dentry *d; 1395 int err = 0; 1396 1397 d = debugfs_create_dir("dma_buf", NULL); 1398 if (IS_ERR(d)) 1399 return PTR_ERR(d); 1400 1401 dma_buf_debugfs_dir = d; 1402 1403 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, 1404 NULL, &dma_buf_debug_fops); 1405 if (IS_ERR(d)) { 1406 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 1407 debugfs_remove_recursive(dma_buf_debugfs_dir); 1408 dma_buf_debugfs_dir = NULL; 1409 err = PTR_ERR(d); 1410 } 1411 1412 return err; 1413 } 1414 1415 static void dma_buf_uninit_debugfs(void) 1416 { 1417 debugfs_remove_recursive(dma_buf_debugfs_dir); 1418 } 1419 #else 1420 static inline int dma_buf_init_debugfs(void) 1421 { 1422 return 0; 1423 } 1424 static inline void dma_buf_uninit_debugfs(void) 1425 { 1426 } 1427 #endif 1428 1429 static int __init dma_buf_init(void) 1430 { 1431 int ret; 1432 1433 ret = dma_buf_init_sysfs_statistics(); 1434 if (ret) 1435 return ret; 1436 1437 dma_buf_mnt = kern_mount(&dma_buf_fs_type); 1438 if (IS_ERR(dma_buf_mnt)) 1439 return PTR_ERR(dma_buf_mnt); 1440 1441 mutex_init(&db_list.lock); 1442 INIT_LIST_HEAD(&db_list.head); 1443 dma_buf_init_debugfs(); 1444 return 0; 1445 } 1446 subsys_initcall(dma_buf_init); 1447 1448 static void __exit dma_buf_deinit(void) 1449 { 1450 dma_buf_uninit_debugfs(); 1451 kern_unmount(dma_buf_mnt); 1452 dma_buf_uninit_sysfs_statistics(); 1453 } 1454 __exitcall(dma_buf_deinit); 1455