1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework for buffer objects that can be shared across devices/subsystems. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 14 #include <linux/fs.h> 15 #include <linux/slab.h> 16 #include <linux/dma-buf.h> 17 #include <linux/dma-fence.h> 18 #include <linux/anon_inodes.h> 19 #include <linux/export.h> 20 #include <linux/debugfs.h> 21 #include <linux/module.h> 22 #include <linux/seq_file.h> 23 #include <linux/poll.h> 24 #include <linux/dma-resv.h> 25 #include <linux/mm.h> 26 #include <linux/mount.h> 27 #include <linux/pseudo_fs.h> 28 29 #include <uapi/linux/dma-buf.h> 30 #include <uapi/linux/magic.h> 31 32 static inline int is_dma_buf_file(struct file *); 33 34 struct dma_buf_list { 35 struct list_head head; 36 struct mutex lock; 37 }; 38 39 static struct dma_buf_list db_list; 40 41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) 42 { 43 struct dma_buf *dmabuf; 44 char name[DMA_BUF_NAME_LEN]; 45 size_t ret = 0; 46 47 dmabuf = dentry->d_fsdata; 48 spin_lock(&dmabuf->name_lock); 49 if (dmabuf->name) 50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN); 51 spin_unlock(&dmabuf->name_lock); 52 53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s", 54 dentry->d_name.name, ret > 0 ? name : ""); 55 } 56 57 static void dma_buf_release(struct dentry *dentry) 58 { 59 struct dma_buf *dmabuf; 60 61 dmabuf = dentry->d_fsdata; 62 63 BUG_ON(dmabuf->vmapping_counter); 64 65 /* 66 * Any fences that a dma-buf poll can wait on should be signaled 67 * before releasing dma-buf. This is the responsibility of each 68 * driver that uses the reservation objects. 69 * 70 * If you hit this BUG() it means someone dropped their ref to the 71 * dma-buf while still having pending operation to the buffer. 72 */ 73 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); 74 75 dmabuf->ops->release(dmabuf); 76 77 mutex_lock(&db_list.lock); 78 list_del(&dmabuf->list_node); 79 mutex_unlock(&db_list.lock); 80 81 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) 82 dma_resv_fini(dmabuf->resv); 83 84 module_put(dmabuf->owner); 85 kfree(dmabuf->name); 86 kfree(dmabuf); 87 } 88 89 static const struct dentry_operations dma_buf_dentry_ops = { 90 .d_dname = dmabuffs_dname, 91 .d_release = dma_buf_release, 92 }; 93 94 static struct vfsmount *dma_buf_mnt; 95 96 static int dma_buf_fs_init_context(struct fs_context *fc) 97 { 98 struct pseudo_fs_context *ctx; 99 100 ctx = init_pseudo(fc, DMA_BUF_MAGIC); 101 if (!ctx) 102 return -ENOMEM; 103 ctx->dops = &dma_buf_dentry_ops; 104 return 0; 105 } 106 107 static struct file_system_type dma_buf_fs_type = { 108 .name = "dmabuf", 109 .init_fs_context = dma_buf_fs_init_context, 110 .kill_sb = kill_anon_super, 111 }; 112 113 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) 114 { 115 struct dma_buf *dmabuf; 116 117 if (!is_dma_buf_file(file)) 118 return -EINVAL; 119 120 dmabuf = file->private_data; 121 122 /* check if buffer supports mmap */ 123 if (!dmabuf->ops->mmap) 124 return -EINVAL; 125 126 /* check for overflowing the buffer's size */ 127 if (vma->vm_pgoff + vma_pages(vma) > 128 dmabuf->size >> PAGE_SHIFT) 129 return -EINVAL; 130 131 return dmabuf->ops->mmap(dmabuf, vma); 132 } 133 134 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) 135 { 136 struct dma_buf *dmabuf; 137 loff_t base; 138 139 if (!is_dma_buf_file(file)) 140 return -EBADF; 141 142 dmabuf = file->private_data; 143 144 /* only support discovering the end of the buffer, 145 but also allow SEEK_SET to maintain the idiomatic 146 SEEK_END(0), SEEK_CUR(0) pattern */ 147 if (whence == SEEK_END) 148 base = dmabuf->size; 149 else if (whence == SEEK_SET) 150 base = 0; 151 else 152 return -EINVAL; 153 154 if (offset != 0) 155 return -EINVAL; 156 157 return base + offset; 158 } 159 160 /** 161 * DOC: implicit fence polling 162 * 163 * To support cross-device and cross-driver synchronization of buffer access 164 * implicit fences (represented internally in the kernel with &struct dma_fence) 165 * can be attached to a &dma_buf. The glue for that and a few related things are 166 * provided in the &dma_resv structure. 167 * 168 * Userspace can query the state of these implicitly tracked fences using poll() 169 * and related system calls: 170 * 171 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the 172 * most recent write or exclusive fence. 173 * 174 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of 175 * all attached fences, shared and exclusive ones. 176 * 177 * Note that this only signals the completion of the respective fences, i.e. the 178 * DMA transfers are complete. Cache flushing and any other necessary 179 * preparations before CPU access can begin still need to happen. 180 */ 181 182 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) 183 { 184 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; 185 unsigned long flags; 186 187 spin_lock_irqsave(&dcb->poll->lock, flags); 188 wake_up_locked_poll(dcb->poll, dcb->active); 189 dcb->active = 0; 190 spin_unlock_irqrestore(&dcb->poll->lock, flags); 191 } 192 193 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 194 { 195 struct dma_buf *dmabuf; 196 struct dma_resv *resv; 197 struct dma_resv_list *fobj; 198 struct dma_fence *fence_excl; 199 __poll_t events; 200 unsigned shared_count, seq; 201 202 dmabuf = file->private_data; 203 if (!dmabuf || !dmabuf->resv) 204 return EPOLLERR; 205 206 resv = dmabuf->resv; 207 208 poll_wait(file, &dmabuf->poll, poll); 209 210 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 211 if (!events) 212 return 0; 213 214 retry: 215 seq = read_seqcount_begin(&resv->seq); 216 rcu_read_lock(); 217 218 fobj = rcu_dereference(resv->fence); 219 if (fobj) 220 shared_count = fobj->shared_count; 221 else 222 shared_count = 0; 223 fence_excl = rcu_dereference(resv->fence_excl); 224 if (read_seqcount_retry(&resv->seq, seq)) { 225 rcu_read_unlock(); 226 goto retry; 227 } 228 229 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { 230 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; 231 __poll_t pevents = EPOLLIN; 232 233 if (shared_count == 0) 234 pevents |= EPOLLOUT; 235 236 spin_lock_irq(&dmabuf->poll.lock); 237 if (dcb->active) { 238 dcb->active |= pevents; 239 events &= ~pevents; 240 } else 241 dcb->active = pevents; 242 spin_unlock_irq(&dmabuf->poll.lock); 243 244 if (events & pevents) { 245 if (!dma_fence_get_rcu(fence_excl)) { 246 /* force a recheck */ 247 events &= ~pevents; 248 dma_buf_poll_cb(NULL, &dcb->cb); 249 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, 250 dma_buf_poll_cb)) { 251 events &= ~pevents; 252 dma_fence_put(fence_excl); 253 } else { 254 /* 255 * No callback queued, wake up any additional 256 * waiters. 257 */ 258 dma_fence_put(fence_excl); 259 dma_buf_poll_cb(NULL, &dcb->cb); 260 } 261 } 262 } 263 264 if ((events & EPOLLOUT) && shared_count > 0) { 265 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; 266 int i; 267 268 /* Only queue a new callback if no event has fired yet */ 269 spin_lock_irq(&dmabuf->poll.lock); 270 if (dcb->active) 271 events &= ~EPOLLOUT; 272 else 273 dcb->active = EPOLLOUT; 274 spin_unlock_irq(&dmabuf->poll.lock); 275 276 if (!(events & EPOLLOUT)) 277 goto out; 278 279 for (i = 0; i < shared_count; ++i) { 280 struct dma_fence *fence = rcu_dereference(fobj->shared[i]); 281 282 if (!dma_fence_get_rcu(fence)) { 283 /* 284 * fence refcount dropped to zero, this means 285 * that fobj has been freed 286 * 287 * call dma_buf_poll_cb and force a recheck! 288 */ 289 events &= ~EPOLLOUT; 290 dma_buf_poll_cb(NULL, &dcb->cb); 291 break; 292 } 293 if (!dma_fence_add_callback(fence, &dcb->cb, 294 dma_buf_poll_cb)) { 295 dma_fence_put(fence); 296 events &= ~EPOLLOUT; 297 break; 298 } 299 dma_fence_put(fence); 300 } 301 302 /* No callback queued, wake up any additional waiters. */ 303 if (i == shared_count) 304 dma_buf_poll_cb(NULL, &dcb->cb); 305 } 306 307 out: 308 rcu_read_unlock(); 309 return events; 310 } 311 312 /** 313 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage. 314 * The name of the dma-buf buffer can only be set when the dma-buf is not 315 * attached to any devices. It could theoritically support changing the 316 * name of the dma-buf if the same piece of memory is used for multiple 317 * purpose between different devices. 318 * 319 * @dmabuf: [in] dmabuf buffer that will be renamed. 320 * @buf: [in] A piece of userspace memory that contains the name of 321 * the dma-buf. 322 * 323 * Returns 0 on success. If the dma-buf buffer is already attached to 324 * devices, return -EBUSY. 325 * 326 */ 327 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf) 328 { 329 char *name = strndup_user(buf, DMA_BUF_NAME_LEN); 330 long ret = 0; 331 332 if (IS_ERR(name)) 333 return PTR_ERR(name); 334 335 dma_resv_lock(dmabuf->resv, NULL); 336 if (!list_empty(&dmabuf->attachments)) { 337 ret = -EBUSY; 338 kfree(name); 339 goto out_unlock; 340 } 341 spin_lock(&dmabuf->name_lock); 342 kfree(dmabuf->name); 343 dmabuf->name = name; 344 spin_unlock(&dmabuf->name_lock); 345 346 out_unlock: 347 dma_resv_unlock(dmabuf->resv); 348 return ret; 349 } 350 351 static long dma_buf_ioctl(struct file *file, 352 unsigned int cmd, unsigned long arg) 353 { 354 struct dma_buf *dmabuf; 355 struct dma_buf_sync sync; 356 enum dma_data_direction direction; 357 int ret; 358 359 dmabuf = file->private_data; 360 361 switch (cmd) { 362 case DMA_BUF_IOCTL_SYNC: 363 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) 364 return -EFAULT; 365 366 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) 367 return -EINVAL; 368 369 switch (sync.flags & DMA_BUF_SYNC_RW) { 370 case DMA_BUF_SYNC_READ: 371 direction = DMA_FROM_DEVICE; 372 break; 373 case DMA_BUF_SYNC_WRITE: 374 direction = DMA_TO_DEVICE; 375 break; 376 case DMA_BUF_SYNC_RW: 377 direction = DMA_BIDIRECTIONAL; 378 break; 379 default: 380 return -EINVAL; 381 } 382 383 if (sync.flags & DMA_BUF_SYNC_END) 384 ret = dma_buf_end_cpu_access(dmabuf, direction); 385 else 386 ret = dma_buf_begin_cpu_access(dmabuf, direction); 387 388 return ret; 389 390 case DMA_BUF_SET_NAME_A: 391 case DMA_BUF_SET_NAME_B: 392 return dma_buf_set_name(dmabuf, (const char __user *)arg); 393 394 default: 395 return -ENOTTY; 396 } 397 } 398 399 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file) 400 { 401 struct dma_buf *dmabuf = file->private_data; 402 403 seq_printf(m, "size:\t%zu\n", dmabuf->size); 404 /* Don't count the temporary reference taken inside procfs seq_show */ 405 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1); 406 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name); 407 spin_lock(&dmabuf->name_lock); 408 if (dmabuf->name) 409 seq_printf(m, "name:\t%s\n", dmabuf->name); 410 spin_unlock(&dmabuf->name_lock); 411 } 412 413 static const struct file_operations dma_buf_fops = { 414 .mmap = dma_buf_mmap_internal, 415 .llseek = dma_buf_llseek, 416 .poll = dma_buf_poll, 417 .unlocked_ioctl = dma_buf_ioctl, 418 .compat_ioctl = compat_ptr_ioctl, 419 .show_fdinfo = dma_buf_show_fdinfo, 420 }; 421 422 /* 423 * is_dma_buf_file - Check if struct file* is associated with dma_buf 424 */ 425 static inline int is_dma_buf_file(struct file *file) 426 { 427 return file->f_op == &dma_buf_fops; 428 } 429 430 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) 431 { 432 struct file *file; 433 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb); 434 435 if (IS_ERR(inode)) 436 return ERR_CAST(inode); 437 438 inode->i_size = dmabuf->size; 439 inode_set_bytes(inode, dmabuf->size); 440 441 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf", 442 flags, &dma_buf_fops); 443 if (IS_ERR(file)) 444 goto err_alloc_file; 445 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 446 file->private_data = dmabuf; 447 file->f_path.dentry->d_fsdata = dmabuf; 448 449 return file; 450 451 err_alloc_file: 452 iput(inode); 453 return file; 454 } 455 456 /** 457 * DOC: dma buf device access 458 * 459 * For device DMA access to a shared DMA buffer the usual sequence of operations 460 * is fairly simple: 461 * 462 * 1. The exporter defines his exporter instance using 463 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private 464 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace 465 * as a file descriptor by calling dma_buf_fd(). 466 * 467 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer 468 * to share with: First the filedescriptor is converted to a &dma_buf using 469 * dma_buf_get(). Then the buffer is attached to the device using 470 * dma_buf_attach(). 471 * 472 * Up to this stage the exporter is still free to migrate or reallocate the 473 * backing storage. 474 * 475 * 3. Once the buffer is attached to all devices userspace can initiate DMA 476 * access to the shared buffer. In the kernel this is done by calling 477 * dma_buf_map_attachment() and dma_buf_unmap_attachment(). 478 * 479 * 4. Once a driver is done with a shared buffer it needs to call 480 * dma_buf_detach() (after cleaning up any mappings) and then release the 481 * reference acquired with dma_buf_get by calling dma_buf_put(). 482 * 483 * For the detailed semantics exporters are expected to implement see 484 * &dma_buf_ops. 485 */ 486 487 /** 488 * dma_buf_export - Creates a new dma_buf, and associates an anon file 489 * with this buffer, so it can be exported. 490 * Also connect the allocator specific data and ops to the buffer. 491 * Additionally, provide a name string for exporter; useful in debugging. 492 * 493 * @exp_info: [in] holds all the export related information provided 494 * by the exporter. see &struct dma_buf_export_info 495 * for further details. 496 * 497 * Returns, on success, a newly created dma_buf object, which wraps the 498 * supplied private data and operations for dma_buf_ops. On either missing 499 * ops, or error in allocating struct dma_buf, will return negative error. 500 * 501 * For most cases the easiest way to create @exp_info is through the 502 * %DEFINE_DMA_BUF_EXPORT_INFO macro. 503 */ 504 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) 505 { 506 struct dma_buf *dmabuf; 507 struct dma_resv *resv = exp_info->resv; 508 struct file *file; 509 size_t alloc_size = sizeof(struct dma_buf); 510 int ret; 511 512 if (!exp_info->resv) 513 alloc_size += sizeof(struct dma_resv); 514 else 515 /* prevent &dma_buf[1] == dma_buf->resv */ 516 alloc_size += 1; 517 518 if (WARN_ON(!exp_info->priv 519 || !exp_info->ops 520 || !exp_info->ops->map_dma_buf 521 || !exp_info->ops->unmap_dma_buf 522 || !exp_info->ops->release)) { 523 return ERR_PTR(-EINVAL); 524 } 525 526 if (WARN_ON(exp_info->ops->cache_sgt_mapping && 527 (exp_info->ops->pin || exp_info->ops->unpin))) 528 return ERR_PTR(-EINVAL); 529 530 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) 531 return ERR_PTR(-EINVAL); 532 533 if (!try_module_get(exp_info->owner)) 534 return ERR_PTR(-ENOENT); 535 536 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 537 if (!dmabuf) { 538 ret = -ENOMEM; 539 goto err_module; 540 } 541 542 dmabuf->priv = exp_info->priv; 543 dmabuf->ops = exp_info->ops; 544 dmabuf->size = exp_info->size; 545 dmabuf->exp_name = exp_info->exp_name; 546 dmabuf->owner = exp_info->owner; 547 spin_lock_init(&dmabuf->name_lock); 548 init_waitqueue_head(&dmabuf->poll); 549 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; 550 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; 551 552 if (!resv) { 553 resv = (struct dma_resv *)&dmabuf[1]; 554 dma_resv_init(resv); 555 } 556 dmabuf->resv = resv; 557 558 file = dma_buf_getfile(dmabuf, exp_info->flags); 559 if (IS_ERR(file)) { 560 ret = PTR_ERR(file); 561 goto err_dmabuf; 562 } 563 564 file->f_mode |= FMODE_LSEEK; 565 dmabuf->file = file; 566 567 mutex_init(&dmabuf->lock); 568 INIT_LIST_HEAD(&dmabuf->attachments); 569 570 mutex_lock(&db_list.lock); 571 list_add(&dmabuf->list_node, &db_list.head); 572 mutex_unlock(&db_list.lock); 573 574 return dmabuf; 575 576 err_dmabuf: 577 kfree(dmabuf); 578 err_module: 579 module_put(exp_info->owner); 580 return ERR_PTR(ret); 581 } 582 EXPORT_SYMBOL_GPL(dma_buf_export); 583 584 /** 585 * dma_buf_fd - returns a file descriptor for the given dma_buf 586 * @dmabuf: [in] pointer to dma_buf for which fd is required. 587 * @flags: [in] flags to give to fd 588 * 589 * On success, returns an associated 'fd'. Else, returns error. 590 */ 591 int dma_buf_fd(struct dma_buf *dmabuf, int flags) 592 { 593 int fd; 594 595 if (!dmabuf || !dmabuf->file) 596 return -EINVAL; 597 598 fd = get_unused_fd_flags(flags); 599 if (fd < 0) 600 return fd; 601 602 fd_install(fd, dmabuf->file); 603 604 return fd; 605 } 606 EXPORT_SYMBOL_GPL(dma_buf_fd); 607 608 /** 609 * dma_buf_get - returns the dma_buf structure related to an fd 610 * @fd: [in] fd associated with the dma_buf to be returned 611 * 612 * On success, returns the dma_buf structure associated with an fd; uses 613 * file's refcounting done by fget to increase refcount. returns ERR_PTR 614 * otherwise. 615 */ 616 struct dma_buf *dma_buf_get(int fd) 617 { 618 struct file *file; 619 620 file = fget(fd); 621 622 if (!file) 623 return ERR_PTR(-EBADF); 624 625 if (!is_dma_buf_file(file)) { 626 fput(file); 627 return ERR_PTR(-EINVAL); 628 } 629 630 return file->private_data; 631 } 632 EXPORT_SYMBOL_GPL(dma_buf_get); 633 634 /** 635 * dma_buf_put - decreases refcount of the buffer 636 * @dmabuf: [in] buffer to reduce refcount of 637 * 638 * Uses file's refcounting done implicitly by fput(). 639 * 640 * If, as a result of this call, the refcount becomes 0, the 'release' file 641 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc 642 * in turn, and frees the memory allocated for dmabuf when exported. 643 */ 644 void dma_buf_put(struct dma_buf *dmabuf) 645 { 646 if (WARN_ON(!dmabuf || !dmabuf->file)) 647 return; 648 649 fput(dmabuf->file); 650 } 651 EXPORT_SYMBOL_GPL(dma_buf_put); 652 653 /** 654 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally, 655 * calls attach() of dma_buf_ops to allow device-specific attach functionality 656 * @dmabuf: [in] buffer to attach device to. 657 * @dev: [in] device to be attached. 658 * @importer_ops: [in] importer operations for the attachment 659 * @importer_priv: [in] importer private pointer for the attachment 660 * 661 * Returns struct dma_buf_attachment pointer for this attachment. Attachments 662 * must be cleaned up by calling dma_buf_detach(). 663 * 664 * Returns: 665 * 666 * A pointer to newly created &dma_buf_attachment on success, or a negative 667 * error code wrapped into a pointer on failure. 668 * 669 * Note that this can fail if the backing storage of @dmabuf is in a place not 670 * accessible to @dev, and cannot be moved to a more suitable place. This is 671 * indicated with the error code -EBUSY. 672 */ 673 struct dma_buf_attachment * 674 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 675 const struct dma_buf_attach_ops *importer_ops, 676 void *importer_priv) 677 { 678 struct dma_buf_attachment *attach; 679 int ret; 680 681 if (WARN_ON(!dmabuf || !dev)) 682 return ERR_PTR(-EINVAL); 683 684 if (WARN_ON(importer_ops && !importer_ops->move_notify)) 685 return ERR_PTR(-EINVAL); 686 687 attach = kzalloc(sizeof(*attach), GFP_KERNEL); 688 if (!attach) 689 return ERR_PTR(-ENOMEM); 690 691 attach->dev = dev; 692 attach->dmabuf = dmabuf; 693 if (importer_ops) 694 attach->peer2peer = importer_ops->allow_peer2peer; 695 attach->importer_ops = importer_ops; 696 attach->importer_priv = importer_priv; 697 698 if (dmabuf->ops->attach) { 699 ret = dmabuf->ops->attach(dmabuf, attach); 700 if (ret) 701 goto err_attach; 702 } 703 dma_resv_lock(dmabuf->resv, NULL); 704 list_add(&attach->node, &dmabuf->attachments); 705 dma_resv_unlock(dmabuf->resv); 706 707 /* When either the importer or the exporter can't handle dynamic 708 * mappings we cache the mapping here to avoid issues with the 709 * reservation object lock. 710 */ 711 if (dma_buf_attachment_is_dynamic(attach) != 712 dma_buf_is_dynamic(dmabuf)) { 713 struct sg_table *sgt; 714 715 if (dma_buf_is_dynamic(attach->dmabuf)) { 716 dma_resv_lock(attach->dmabuf->resv, NULL); 717 ret = dma_buf_pin(attach); 718 if (ret) 719 goto err_unlock; 720 } 721 722 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL); 723 if (!sgt) 724 sgt = ERR_PTR(-ENOMEM); 725 if (IS_ERR(sgt)) { 726 ret = PTR_ERR(sgt); 727 goto err_unpin; 728 } 729 if (dma_buf_is_dynamic(attach->dmabuf)) 730 dma_resv_unlock(attach->dmabuf->resv); 731 attach->sgt = sgt; 732 attach->dir = DMA_BIDIRECTIONAL; 733 } 734 735 return attach; 736 737 err_attach: 738 kfree(attach); 739 return ERR_PTR(ret); 740 741 err_unpin: 742 if (dma_buf_is_dynamic(attach->dmabuf)) 743 dma_buf_unpin(attach); 744 745 err_unlock: 746 if (dma_buf_is_dynamic(attach->dmabuf)) 747 dma_resv_unlock(attach->dmabuf->resv); 748 749 dma_buf_detach(dmabuf, attach); 750 return ERR_PTR(ret); 751 } 752 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach); 753 754 /** 755 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach 756 * @dmabuf: [in] buffer to attach device to. 757 * @dev: [in] device to be attached. 758 * 759 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static 760 * mapping. 761 */ 762 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 763 struct device *dev) 764 { 765 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL); 766 } 767 EXPORT_SYMBOL_GPL(dma_buf_attach); 768 769 /** 770 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; 771 * optionally calls detach() of dma_buf_ops for device-specific detach 772 * @dmabuf: [in] buffer to detach from. 773 * @attach: [in] attachment to be detached; is free'd after this call. 774 * 775 * Clean up a device attachment obtained by calling dma_buf_attach(). 776 */ 777 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 778 { 779 if (WARN_ON(!dmabuf || !attach)) 780 return; 781 782 if (attach->sgt) { 783 if (dma_buf_is_dynamic(attach->dmabuf)) 784 dma_resv_lock(attach->dmabuf->resv, NULL); 785 786 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir); 787 788 if (dma_buf_is_dynamic(attach->dmabuf)) { 789 dma_buf_unpin(attach); 790 dma_resv_unlock(attach->dmabuf->resv); 791 } 792 } 793 794 dma_resv_lock(dmabuf->resv, NULL); 795 list_del(&attach->node); 796 dma_resv_unlock(dmabuf->resv); 797 if (dmabuf->ops->detach) 798 dmabuf->ops->detach(dmabuf, attach); 799 800 kfree(attach); 801 } 802 EXPORT_SYMBOL_GPL(dma_buf_detach); 803 804 /** 805 * dma_buf_pin - Lock down the DMA-buf 806 * 807 * @attach: [in] attachment which should be pinned 808 * 809 * Returns: 810 * 0 on success, negative error code on failure. 811 */ 812 int dma_buf_pin(struct dma_buf_attachment *attach) 813 { 814 struct dma_buf *dmabuf = attach->dmabuf; 815 int ret = 0; 816 817 dma_resv_assert_held(dmabuf->resv); 818 819 if (dmabuf->ops->pin) 820 ret = dmabuf->ops->pin(attach); 821 822 return ret; 823 } 824 EXPORT_SYMBOL_GPL(dma_buf_pin); 825 826 /** 827 * dma_buf_unpin - Remove lock from DMA-buf 828 * 829 * @attach: [in] attachment which should be unpinned 830 */ 831 void dma_buf_unpin(struct dma_buf_attachment *attach) 832 { 833 struct dma_buf *dmabuf = attach->dmabuf; 834 835 dma_resv_assert_held(dmabuf->resv); 836 837 if (dmabuf->ops->unpin) 838 dmabuf->ops->unpin(attach); 839 } 840 EXPORT_SYMBOL_GPL(dma_buf_unpin); 841 842 /** 843 * dma_buf_map_attachment - Returns the scatterlist table of the attachment; 844 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the 845 * dma_buf_ops. 846 * @attach: [in] attachment whose scatterlist is to be returned 847 * @direction: [in] direction of DMA transfer 848 * 849 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR 850 * on error. May return -EINTR if it is interrupted by a signal. 851 * 852 * On success, the DMA addresses and lengths in the returned scatterlist are 853 * PAGE_SIZE aligned. 854 * 855 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that 856 * the underlying backing storage is pinned for as long as a mapping exists, 857 * therefore users/importers should not hold onto a mapping for undue amounts of 858 * time. 859 */ 860 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, 861 enum dma_data_direction direction) 862 { 863 struct sg_table *sg_table; 864 int r; 865 866 might_sleep(); 867 868 if (WARN_ON(!attach || !attach->dmabuf)) 869 return ERR_PTR(-EINVAL); 870 871 if (dma_buf_attachment_is_dynamic(attach)) 872 dma_resv_assert_held(attach->dmabuf->resv); 873 874 if (attach->sgt) { 875 /* 876 * Two mappings with different directions for the same 877 * attachment are not allowed. 878 */ 879 if (attach->dir != direction && 880 attach->dir != DMA_BIDIRECTIONAL) 881 return ERR_PTR(-EBUSY); 882 883 return attach->sgt; 884 } 885 886 if (dma_buf_is_dynamic(attach->dmabuf)) { 887 dma_resv_assert_held(attach->dmabuf->resv); 888 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { 889 r = dma_buf_pin(attach); 890 if (r) 891 return ERR_PTR(r); 892 } 893 } 894 895 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); 896 if (!sg_table) 897 sg_table = ERR_PTR(-ENOMEM); 898 899 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && 900 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 901 dma_buf_unpin(attach); 902 903 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { 904 attach->sgt = sg_table; 905 attach->dir = direction; 906 } 907 908 #ifdef CONFIG_DMA_API_DEBUG 909 { 910 struct scatterlist *sg; 911 u64 addr; 912 int len; 913 int i; 914 915 for_each_sgtable_dma_sg(sg_table, sg, i) { 916 addr = sg_dma_address(sg); 917 len = sg_dma_len(sg); 918 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) { 919 pr_debug("%s: addr %llx or len %x is not page aligned!\n", 920 __func__, addr, len); 921 } 922 } 923 } 924 #endif /* CONFIG_DMA_API_DEBUG */ 925 926 return sg_table; 927 } 928 EXPORT_SYMBOL_GPL(dma_buf_map_attachment); 929 930 /** 931 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might 932 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of 933 * dma_buf_ops. 934 * @attach: [in] attachment to unmap buffer from 935 * @sg_table: [in] scatterlist info of the buffer to unmap 936 * @direction: [in] direction of DMA transfer 937 * 938 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). 939 */ 940 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 941 struct sg_table *sg_table, 942 enum dma_data_direction direction) 943 { 944 might_sleep(); 945 946 if (WARN_ON(!attach || !attach->dmabuf || !sg_table)) 947 return; 948 949 if (dma_buf_attachment_is_dynamic(attach)) 950 dma_resv_assert_held(attach->dmabuf->resv); 951 952 if (attach->sgt == sg_table) 953 return; 954 955 if (dma_buf_is_dynamic(attach->dmabuf)) 956 dma_resv_assert_held(attach->dmabuf->resv); 957 958 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); 959 960 if (dma_buf_is_dynamic(attach->dmabuf) && 961 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) 962 dma_buf_unpin(attach); 963 } 964 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 965 966 /** 967 * dma_buf_move_notify - notify attachments that DMA-buf is moving 968 * 969 * @dmabuf: [in] buffer which is moving 970 * 971 * Informs all attachmenst that they need to destroy and recreated all their 972 * mappings. 973 */ 974 void dma_buf_move_notify(struct dma_buf *dmabuf) 975 { 976 struct dma_buf_attachment *attach; 977 978 dma_resv_assert_held(dmabuf->resv); 979 980 list_for_each_entry(attach, &dmabuf->attachments, node) 981 if (attach->importer_ops) 982 attach->importer_ops->move_notify(attach); 983 } 984 EXPORT_SYMBOL_GPL(dma_buf_move_notify); 985 986 /** 987 * DOC: cpu access 988 * 989 * There are mutliple reasons for supporting CPU access to a dma buffer object: 990 * 991 * - Fallback operations in the kernel, for example when a device is connected 992 * over USB and the kernel needs to shuffle the data around first before 993 * sending it away. Cache coherency is handled by braketing any transactions 994 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() 995 * access. 996 * 997 * Since for most kernel internal dma-buf accesses need the entire buffer, a 998 * vmap interface is introduced. Note that on very old 32-bit architectures 999 * vmalloc space might be limited and result in vmap calls failing. 1000 * 1001 * Interfaces:: 1002 * void \*dma_buf_vmap(struct dma_buf \*dmabuf) 1003 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr) 1004 * 1005 * The vmap call can fail if there is no vmap support in the exporter, or if 1006 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note 1007 * that the dma-buf layer keeps a reference count for all vmap access and 1008 * calls down into the exporter's vmap function only when no vmapping exists, 1009 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is 1010 * provided by taking the dma_buf->lock mutex. 1011 * 1012 * - For full compatibility on the importer side with existing userspace 1013 * interfaces, which might already support mmap'ing buffers. This is needed in 1014 * many processing pipelines (e.g. feeding a software rendered image into a 1015 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION 1016 * framework already supported this and for DMA buffer file descriptors to 1017 * replace ION buffers mmap support was needed. 1018 * 1019 * There is no special interfaces, userspace simply calls mmap on the dma-buf 1020 * fd. But like for CPU access there's a need to braket the actual access, 1021 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that 1022 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must 1023 * be restarted. 1024 * 1025 * Some systems might need some sort of cache coherency management e.g. when 1026 * CPU and GPU domains are being accessed through dma-buf at the same time. 1027 * To circumvent this problem there are begin/end coherency markers, that 1028 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace 1029 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The 1030 * sequence would be used like following: 1031 * 1032 * - mmap dma-buf fd 1033 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write 1034 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you 1035 * want (with the new data being consumed by say the GPU or the scanout 1036 * device) 1037 * - munmap once you don't need the buffer any more 1038 * 1039 * For correctness and optimal performance, it is always required to use 1040 * SYNC_START and SYNC_END before and after, respectively, when accessing the 1041 * mapped address. Userspace cannot rely on coherent access, even when there 1042 * are systems where it just works without calling these ioctls. 1043 * 1044 * - And as a CPU fallback in userspace processing pipelines. 1045 * 1046 * Similar to the motivation for kernel cpu access it is again important that 1047 * the userspace code of a given importing subsystem can use the same 1048 * interfaces with a imported dma-buf buffer object as with a native buffer 1049 * object. This is especially important for drm where the userspace part of 1050 * contemporary OpenGL, X, and other drivers is huge, and reworking them to 1051 * use a different way to mmap a buffer rather invasive. 1052 * 1053 * The assumption in the current dma-buf interfaces is that redirecting the 1054 * initial mmap is all that's needed. A survey of some of the existing 1055 * subsystems shows that no driver seems to do any nefarious thing like 1056 * syncing up with outstanding asynchronous processing on the device or 1057 * allocating special resources at fault time. So hopefully this is good 1058 * enough, since adding interfaces to intercept pagefaults and allow pte 1059 * shootdowns would increase the complexity quite a bit. 1060 * 1061 * Interface:: 1062 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, 1063 * unsigned long); 1064 * 1065 * If the importing subsystem simply provides a special-purpose mmap call to 1066 * set up a mapping in userspace, calling do_mmap with dma_buf->file will 1067 * equally achieve that for a dma-buf object. 1068 */ 1069 1070 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1071 enum dma_data_direction direction) 1072 { 1073 bool write = (direction == DMA_BIDIRECTIONAL || 1074 direction == DMA_TO_DEVICE); 1075 struct dma_resv *resv = dmabuf->resv; 1076 long ret; 1077 1078 /* Wait on any implicit rendering fences */ 1079 ret = dma_resv_wait_timeout_rcu(resv, write, true, 1080 MAX_SCHEDULE_TIMEOUT); 1081 if (ret < 0) 1082 return ret; 1083 1084 return 0; 1085 } 1086 1087 /** 1088 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the 1089 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific 1090 * preparations. Coherency is only guaranteed in the specified range for the 1091 * specified access direction. 1092 * @dmabuf: [in] buffer to prepare cpu access for. 1093 * @direction: [in] length of range for cpu access. 1094 * 1095 * After the cpu access is complete the caller should call 1096 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is 1097 * it guaranteed to be coherent with other DMA access. 1098 * 1099 * Can return negative error values, returns 0 on success. 1100 */ 1101 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 1102 enum dma_data_direction direction) 1103 { 1104 int ret = 0; 1105 1106 if (WARN_ON(!dmabuf)) 1107 return -EINVAL; 1108 1109 if (dmabuf->ops->begin_cpu_access) 1110 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction); 1111 1112 /* Ensure that all fences are waited upon - but we first allow 1113 * the native handler the chance to do so more efficiently if it 1114 * chooses. A double invocation here will be reasonably cheap no-op. 1115 */ 1116 if (ret == 0) 1117 ret = __dma_buf_begin_cpu_access(dmabuf, direction); 1118 1119 return ret; 1120 } 1121 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); 1122 1123 /** 1124 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the 1125 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific 1126 * actions. Coherency is only guaranteed in the specified range for the 1127 * specified access direction. 1128 * @dmabuf: [in] buffer to complete cpu access for. 1129 * @direction: [in] length of range for cpu access. 1130 * 1131 * This terminates CPU access started with dma_buf_begin_cpu_access(). 1132 * 1133 * Can return negative error values, returns 0 on success. 1134 */ 1135 int dma_buf_end_cpu_access(struct dma_buf *dmabuf, 1136 enum dma_data_direction direction) 1137 { 1138 int ret = 0; 1139 1140 WARN_ON(!dmabuf); 1141 1142 if (dmabuf->ops->end_cpu_access) 1143 ret = dmabuf->ops->end_cpu_access(dmabuf, direction); 1144 1145 return ret; 1146 } 1147 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); 1148 1149 1150 /** 1151 * dma_buf_mmap - Setup up a userspace mmap with the given vma 1152 * @dmabuf: [in] buffer that should back the vma 1153 * @vma: [in] vma for the mmap 1154 * @pgoff: [in] offset in pages where this mmap should start within the 1155 * dma-buf buffer. 1156 * 1157 * This function adjusts the passed in vma so that it points at the file of the 1158 * dma_buf operation. It also adjusts the starting pgoff and does bounds 1159 * checking on the size of the vma. Then it calls the exporters mmap function to 1160 * set up the mapping. 1161 * 1162 * Can return negative error values, returns 0 on success. 1163 */ 1164 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 1165 unsigned long pgoff) 1166 { 1167 struct file *oldfile; 1168 int ret; 1169 1170 if (WARN_ON(!dmabuf || !vma)) 1171 return -EINVAL; 1172 1173 /* check if buffer supports mmap */ 1174 if (!dmabuf->ops->mmap) 1175 return -EINVAL; 1176 1177 /* check for offset overflow */ 1178 if (pgoff + vma_pages(vma) < pgoff) 1179 return -EOVERFLOW; 1180 1181 /* check for overflowing the buffer's size */ 1182 if (pgoff + vma_pages(vma) > 1183 dmabuf->size >> PAGE_SHIFT) 1184 return -EINVAL; 1185 1186 /* readjust the vma */ 1187 get_file(dmabuf->file); 1188 oldfile = vma->vm_file; 1189 vma->vm_file = dmabuf->file; 1190 vma->vm_pgoff = pgoff; 1191 1192 ret = dmabuf->ops->mmap(dmabuf, vma); 1193 if (ret) { 1194 /* restore old parameters on failure */ 1195 vma->vm_file = oldfile; 1196 fput(dmabuf->file); 1197 } else { 1198 if (oldfile) 1199 fput(oldfile); 1200 } 1201 return ret; 1202 1203 } 1204 EXPORT_SYMBOL_GPL(dma_buf_mmap); 1205 1206 /** 1207 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel 1208 * address space. Same restrictions as for vmap and friends apply. 1209 * @dmabuf: [in] buffer to vmap 1210 * @map: [out] returns the vmap pointer 1211 * 1212 * This call may fail due to lack of virtual mapping address space. 1213 * These calls are optional in drivers. The intended use for them 1214 * is for mapping objects linear in kernel space for high use objects. 1215 * Please attempt to use kmap/kunmap before thinking about these interfaces. 1216 * 1217 * Returns 0 on success, or a negative errno code otherwise. 1218 */ 1219 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 1220 { 1221 struct dma_buf_map ptr; 1222 int ret = 0; 1223 1224 dma_buf_map_clear(map); 1225 1226 if (WARN_ON(!dmabuf)) 1227 return -EINVAL; 1228 1229 if (!dmabuf->ops->vmap) 1230 return -EINVAL; 1231 1232 mutex_lock(&dmabuf->lock); 1233 if (dmabuf->vmapping_counter) { 1234 dmabuf->vmapping_counter++; 1235 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); 1236 *map = dmabuf->vmap_ptr; 1237 goto out_unlock; 1238 } 1239 1240 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr)); 1241 1242 ret = dmabuf->ops->vmap(dmabuf, &ptr); 1243 if (WARN_ON_ONCE(ret)) 1244 goto out_unlock; 1245 1246 dmabuf->vmap_ptr = ptr; 1247 dmabuf->vmapping_counter = 1; 1248 1249 *map = dmabuf->vmap_ptr; 1250 1251 out_unlock: 1252 mutex_unlock(&dmabuf->lock); 1253 return ret; 1254 } 1255 EXPORT_SYMBOL_GPL(dma_buf_vmap); 1256 1257 /** 1258 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap. 1259 * @dmabuf: [in] buffer to vunmap 1260 * @map: [in] vmap pointer to vunmap 1261 */ 1262 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) 1263 { 1264 if (WARN_ON(!dmabuf)) 1265 return; 1266 1267 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr)); 1268 BUG_ON(dmabuf->vmapping_counter == 0); 1269 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map)); 1270 1271 mutex_lock(&dmabuf->lock); 1272 if (--dmabuf->vmapping_counter == 0) { 1273 if (dmabuf->ops->vunmap) 1274 dmabuf->ops->vunmap(dmabuf, map); 1275 dma_buf_map_clear(&dmabuf->vmap_ptr); 1276 } 1277 mutex_unlock(&dmabuf->lock); 1278 } 1279 EXPORT_SYMBOL_GPL(dma_buf_vunmap); 1280 1281 #ifdef CONFIG_DEBUG_FS 1282 static int dma_buf_debug_show(struct seq_file *s, void *unused) 1283 { 1284 int ret; 1285 struct dma_buf *buf_obj; 1286 struct dma_buf_attachment *attach_obj; 1287 struct dma_resv *robj; 1288 struct dma_resv_list *fobj; 1289 struct dma_fence *fence; 1290 unsigned seq; 1291 int count = 0, attach_count, shared_count, i; 1292 size_t size = 0; 1293 1294 ret = mutex_lock_interruptible(&db_list.lock); 1295 1296 if (ret) 1297 return ret; 1298 1299 seq_puts(s, "\nDma-buf Objects:\n"); 1300 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n", 1301 "size", "flags", "mode", "count", "ino"); 1302 1303 list_for_each_entry(buf_obj, &db_list.head, list_node) { 1304 1305 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); 1306 if (ret) 1307 goto error_unlock; 1308 1309 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n", 1310 buf_obj->size, 1311 buf_obj->file->f_flags, buf_obj->file->f_mode, 1312 file_count(buf_obj->file), 1313 buf_obj->exp_name, 1314 file_inode(buf_obj->file)->i_ino, 1315 buf_obj->name ?: ""); 1316 1317 robj = buf_obj->resv; 1318 while (true) { 1319 seq = read_seqcount_begin(&robj->seq); 1320 rcu_read_lock(); 1321 fobj = rcu_dereference(robj->fence); 1322 shared_count = fobj ? fobj->shared_count : 0; 1323 fence = rcu_dereference(robj->fence_excl); 1324 if (!read_seqcount_retry(&robj->seq, seq)) 1325 break; 1326 rcu_read_unlock(); 1327 } 1328 1329 if (fence) 1330 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n", 1331 fence->ops->get_driver_name(fence), 1332 fence->ops->get_timeline_name(fence), 1333 dma_fence_is_signaled(fence) ? "" : "un"); 1334 for (i = 0; i < shared_count; i++) { 1335 fence = rcu_dereference(fobj->shared[i]); 1336 if (!dma_fence_get_rcu(fence)) 1337 continue; 1338 seq_printf(s, "\tShared fence: %s %s %ssignalled\n", 1339 fence->ops->get_driver_name(fence), 1340 fence->ops->get_timeline_name(fence), 1341 dma_fence_is_signaled(fence) ? "" : "un"); 1342 dma_fence_put(fence); 1343 } 1344 rcu_read_unlock(); 1345 1346 seq_puts(s, "\tAttached Devices:\n"); 1347 attach_count = 0; 1348 1349 list_for_each_entry(attach_obj, &buf_obj->attachments, node) { 1350 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev)); 1351 attach_count++; 1352 } 1353 dma_resv_unlock(buf_obj->resv); 1354 1355 seq_printf(s, "Total %d devices attached\n\n", 1356 attach_count); 1357 1358 count++; 1359 size += buf_obj->size; 1360 } 1361 1362 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); 1363 1364 mutex_unlock(&db_list.lock); 1365 return 0; 1366 1367 error_unlock: 1368 mutex_unlock(&db_list.lock); 1369 return ret; 1370 } 1371 1372 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug); 1373 1374 static struct dentry *dma_buf_debugfs_dir; 1375 1376 static int dma_buf_init_debugfs(void) 1377 { 1378 struct dentry *d; 1379 int err = 0; 1380 1381 d = debugfs_create_dir("dma_buf", NULL); 1382 if (IS_ERR(d)) 1383 return PTR_ERR(d); 1384 1385 dma_buf_debugfs_dir = d; 1386 1387 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir, 1388 NULL, &dma_buf_debug_fops); 1389 if (IS_ERR(d)) { 1390 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 1391 debugfs_remove_recursive(dma_buf_debugfs_dir); 1392 dma_buf_debugfs_dir = NULL; 1393 err = PTR_ERR(d); 1394 } 1395 1396 return err; 1397 } 1398 1399 static void dma_buf_uninit_debugfs(void) 1400 { 1401 debugfs_remove_recursive(dma_buf_debugfs_dir); 1402 } 1403 #else 1404 static inline int dma_buf_init_debugfs(void) 1405 { 1406 return 0; 1407 } 1408 static inline void dma_buf_uninit_debugfs(void) 1409 { 1410 } 1411 #endif 1412 1413 static int __init dma_buf_init(void) 1414 { 1415 dma_buf_mnt = kern_mount(&dma_buf_fs_type); 1416 if (IS_ERR(dma_buf_mnt)) 1417 return PTR_ERR(dma_buf_mnt); 1418 1419 mutex_init(&db_list.lock); 1420 INIT_LIST_HEAD(&db_list.head); 1421 dma_buf_init_debugfs(); 1422 return 0; 1423 } 1424 subsys_initcall(dma_buf_init); 1425 1426 static void __exit dma_buf_deinit(void) 1427 { 1428 dma_buf_uninit_debugfs(); 1429 kern_unmount(dma_buf_mnt); 1430 } 1431 __exitcall(dma_buf_deinit); 1432