1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
235fac7e3SMaarten Lankhorst /*
335fac7e3SMaarten Lankhorst * Framework for buffer objects that can be shared across devices/subsystems.
435fac7e3SMaarten Lankhorst *
535fac7e3SMaarten Lankhorst * Copyright(C) 2011 Linaro Limited. All rights reserved.
635fac7e3SMaarten Lankhorst * Author: Sumit Semwal <sumit.semwal@ti.com>
735fac7e3SMaarten Lankhorst *
835fac7e3SMaarten Lankhorst * Many thanks to linaro-mm-sig list, and specially
935fac7e3SMaarten Lankhorst * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
1035fac7e3SMaarten Lankhorst * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
1135fac7e3SMaarten Lankhorst * refining of this idea.
1235fac7e3SMaarten Lankhorst */
1335fac7e3SMaarten Lankhorst
1435fac7e3SMaarten Lankhorst #include <linux/fs.h>
1535fac7e3SMaarten Lankhorst #include <linux/slab.h>
1635fac7e3SMaarten Lankhorst #include <linux/dma-buf.h>
17f54d1867SChris Wilson #include <linux/dma-fence.h>
18c19083c7SJason Ekstrand #include <linux/dma-fence-unwrap.h>
1935fac7e3SMaarten Lankhorst #include <linux/anon_inodes.h>
2035fac7e3SMaarten Lankhorst #include <linux/export.h>
2135fac7e3SMaarten Lankhorst #include <linux/debugfs.h>
229abdffe2SSumit Semwal #include <linux/module.h>
2335fac7e3SMaarten Lankhorst #include <linux/seq_file.h>
2420e10881SJason Ekstrand #include <linux/sync_file.h>
259b495a58SMaarten Lankhorst #include <linux/poll.h>
2652791eeeSChristian König #include <linux/dma-resv.h>
27b02da6f8SMuhammad Falak R Wani #include <linux/mm.h>
28ed63bb1dSGreg Hackmann #include <linux/mount.h>
29933a90bfSLinus Torvalds #include <linux/pseudo_fs.h>
3035fac7e3SMaarten Lankhorst
31c11e391dSDaniel Vetter #include <uapi/linux/dma-buf.h>
32ed63bb1dSGreg Hackmann #include <uapi/linux/magic.h>
33c11e391dSDaniel Vetter
34bdb8d06dSHridya Valsaraju #include "dma-buf-sysfs-stats.h"
35bdb8d06dSHridya Valsaraju
3635fac7e3SMaarten Lankhorst static inline int is_dma_buf_file(struct file *);
3735fac7e3SMaarten Lankhorst
3835fac7e3SMaarten Lankhorst struct dma_buf_list {
3935fac7e3SMaarten Lankhorst struct list_head head;
4035fac7e3SMaarten Lankhorst struct mutex lock;
4135fac7e3SMaarten Lankhorst };
4235fac7e3SMaarten Lankhorst
4335fac7e3SMaarten Lankhorst static struct dma_buf_list db_list;
4435fac7e3SMaarten Lankhorst
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)45bb2bb903SGreg Hackmann static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
46bb2bb903SGreg Hackmann {
47bb2bb903SGreg Hackmann struct dma_buf *dmabuf;
48bb2bb903SGreg Hackmann char name[DMA_BUF_NAME_LEN];
49bb2bb903SGreg Hackmann size_t ret = 0;
50bb2bb903SGreg Hackmann
51bb2bb903SGreg Hackmann dmabuf = dentry->d_fsdata;
526348dd29SCharan Teja Kalla spin_lock(&dmabuf->name_lock);
53bb2bb903SGreg Hackmann if (dmabuf->name)
54bb2bb903SGreg Hackmann ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
556348dd29SCharan Teja Kalla spin_unlock(&dmabuf->name_lock);
56bb2bb903SGreg Hackmann
570f60d288SAl Viro return dynamic_dname(buffer, buflen, "/%s:%s",
58bb2bb903SGreg Hackmann dentry->d_name.name, ret > 0 ? name : "");
59bb2bb903SGreg Hackmann }
60bb2bb903SGreg Hackmann
dma_buf_release(struct dentry * dentry)614ab59c3cSSumit Semwal static void dma_buf_release(struct dentry *dentry)
6235fac7e3SMaarten Lankhorst {
6335fac7e3SMaarten Lankhorst struct dma_buf *dmabuf;
6435fac7e3SMaarten Lankhorst
654ab59c3cSSumit Semwal dmabuf = dentry->d_fsdata;
6619a508bdSCharan Teja Reddy if (unlikely(!dmabuf))
6719a508bdSCharan Teja Reddy return;
6835fac7e3SMaarten Lankhorst
6935fac7e3SMaarten Lankhorst BUG_ON(dmabuf->vmapping_counter);
7035fac7e3SMaarten Lankhorst
719b495a58SMaarten Lankhorst /*
72ff2d2384SMichel Dänzer * If you hit this BUG() it could mean:
73ff2d2384SMichel Dänzer * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
74ff2d2384SMichel Dänzer * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
759b495a58SMaarten Lankhorst */
766b51b02aSChristian König BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
779b495a58SMaarten Lankhorst
7863c57e8dSGuangming Cao dma_buf_stats_teardown(dmabuf);
7935fac7e3SMaarten Lankhorst dmabuf->ops->release(dmabuf);
8035fac7e3SMaarten Lankhorst
8152791eeeSChristian König if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
8252791eeeSChristian König dma_resv_fini(dmabuf->resv);
833aac4502SMaarten Lankhorst
84f492283bSCharan Teja Reddy WARN_ON(!list_empty(&dmabuf->attachments));
859abdffe2SSumit Semwal module_put(dmabuf->owner);
86d1f37226SCong Wang kfree(dmabuf->name);
8735fac7e3SMaarten Lankhorst kfree(dmabuf);
884ab59c3cSSumit Semwal }
894ab59c3cSSumit Semwal
dma_buf_file_release(struct inode * inode,struct file * file)9005cd8469SCharan Teja Reddy static int dma_buf_file_release(struct inode *inode, struct file *file)
9105cd8469SCharan Teja Reddy {
9205cd8469SCharan Teja Reddy struct dma_buf *dmabuf;
9305cd8469SCharan Teja Reddy
9405cd8469SCharan Teja Reddy if (!is_dma_buf_file(file))
9505cd8469SCharan Teja Reddy return -EINVAL;
9605cd8469SCharan Teja Reddy
9705cd8469SCharan Teja Reddy dmabuf = file->private_data;
98f728a5eaSChristian König if (dmabuf) {
9905cd8469SCharan Teja Reddy mutex_lock(&db_list.lock);
10005cd8469SCharan Teja Reddy list_del(&dmabuf->list_node);
10105cd8469SCharan Teja Reddy mutex_unlock(&db_list.lock);
102f728a5eaSChristian König }
10305cd8469SCharan Teja Reddy
10405cd8469SCharan Teja Reddy return 0;
10505cd8469SCharan Teja Reddy }
10605cd8469SCharan Teja Reddy
1074ab59c3cSSumit Semwal static const struct dentry_operations dma_buf_dentry_ops = {
1084ab59c3cSSumit Semwal .d_dname = dmabuffs_dname,
1094ab59c3cSSumit Semwal .d_release = dma_buf_release,
1104ab59c3cSSumit Semwal };
1114ab59c3cSSumit Semwal
1124ab59c3cSSumit Semwal static struct vfsmount *dma_buf_mnt;
1134ab59c3cSSumit Semwal
dma_buf_fs_init_context(struct fs_context * fc)1144ab59c3cSSumit Semwal static int dma_buf_fs_init_context(struct fs_context *fc)
1154ab59c3cSSumit Semwal {
1164ab59c3cSSumit Semwal struct pseudo_fs_context *ctx;
1174ab59c3cSSumit Semwal
1184ab59c3cSSumit Semwal ctx = init_pseudo(fc, DMA_BUF_MAGIC);
1194ab59c3cSSumit Semwal if (!ctx)
1204ab59c3cSSumit Semwal return -ENOMEM;
1214ab59c3cSSumit Semwal ctx->dops = &dma_buf_dentry_ops;
12235fac7e3SMaarten Lankhorst return 0;
12335fac7e3SMaarten Lankhorst }
12435fac7e3SMaarten Lankhorst
1254ab59c3cSSumit Semwal static struct file_system_type dma_buf_fs_type = {
1264ab59c3cSSumit Semwal .name = "dmabuf",
1274ab59c3cSSumit Semwal .init_fs_context = dma_buf_fs_init_context,
1284ab59c3cSSumit Semwal .kill_sb = kill_anon_super,
1294ab59c3cSSumit Semwal };
1304ab59c3cSSumit Semwal
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)13135fac7e3SMaarten Lankhorst static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
13235fac7e3SMaarten Lankhorst {
13335fac7e3SMaarten Lankhorst struct dma_buf *dmabuf;
13435fac7e3SMaarten Lankhorst
13535fac7e3SMaarten Lankhorst if (!is_dma_buf_file(file))
13635fac7e3SMaarten Lankhorst return -EINVAL;
13735fac7e3SMaarten Lankhorst
13835fac7e3SMaarten Lankhorst dmabuf = file->private_data;
13935fac7e3SMaarten Lankhorst
140e3a9d6c5SAndrew F. Davis /* check if buffer supports mmap */
141e3a9d6c5SAndrew F. Davis if (!dmabuf->ops->mmap)
142e3a9d6c5SAndrew F. Davis return -EINVAL;
143e3a9d6c5SAndrew F. Davis
14435fac7e3SMaarten Lankhorst /* check for overflowing the buffer's size */
145b02da6f8SMuhammad Falak R Wani if (vma->vm_pgoff + vma_pages(vma) >
14635fac7e3SMaarten Lankhorst dmabuf->size >> PAGE_SHIFT)
14735fac7e3SMaarten Lankhorst return -EINVAL;
14835fac7e3SMaarten Lankhorst
149*8021fa16SDmitry Osipenko return dmabuf->ops->mmap(dmabuf, vma);
15035fac7e3SMaarten Lankhorst }
15135fac7e3SMaarten Lankhorst
dma_buf_llseek(struct file * file,loff_t offset,int whence)15235fac7e3SMaarten Lankhorst static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
15335fac7e3SMaarten Lankhorst {
15435fac7e3SMaarten Lankhorst struct dma_buf *dmabuf;
15535fac7e3SMaarten Lankhorst loff_t base;
15635fac7e3SMaarten Lankhorst
15735fac7e3SMaarten Lankhorst if (!is_dma_buf_file(file))
15835fac7e3SMaarten Lankhorst return -EBADF;
15935fac7e3SMaarten Lankhorst
16035fac7e3SMaarten Lankhorst dmabuf = file->private_data;
16135fac7e3SMaarten Lankhorst
16235fac7e3SMaarten Lankhorst /* only support discovering the end of the buffer,
16335fac7e3SMaarten Lankhorst but also allow SEEK_SET to maintain the idiomatic
16435fac7e3SMaarten Lankhorst SEEK_END(0), SEEK_CUR(0) pattern */
16535fac7e3SMaarten Lankhorst if (whence == SEEK_END)
16635fac7e3SMaarten Lankhorst base = dmabuf->size;
16735fac7e3SMaarten Lankhorst else if (whence == SEEK_SET)
16835fac7e3SMaarten Lankhorst base = 0;
16935fac7e3SMaarten Lankhorst else
17035fac7e3SMaarten Lankhorst return -EINVAL;
17135fac7e3SMaarten Lankhorst
17235fac7e3SMaarten Lankhorst if (offset != 0)
17335fac7e3SMaarten Lankhorst return -EINVAL;
17435fac7e3SMaarten Lankhorst
17535fac7e3SMaarten Lankhorst return base + offset;
17635fac7e3SMaarten Lankhorst }
17735fac7e3SMaarten Lankhorst
178e7e21c72SDaniel Vetter /**
179102514ecSDaniel Vetter * DOC: implicit fence polling
180e7e21c72SDaniel Vetter *
181e7e21c72SDaniel Vetter * To support cross-device and cross-driver synchronization of buffer access
182102514ecSDaniel Vetter * implicit fences (represented internally in the kernel with &struct dma_fence)
183102514ecSDaniel Vetter * can be attached to a &dma_buf. The glue for that and a few related things are
18452791eeeSChristian König * provided in the &dma_resv structure.
185e7e21c72SDaniel Vetter *
186e7e21c72SDaniel Vetter * Userspace can query the state of these implicitly tracked fences using poll()
187e7e21c72SDaniel Vetter * and related system calls:
188e7e21c72SDaniel Vetter *
189a9a08845SLinus Torvalds * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
190e7e21c72SDaniel Vetter * most recent write or exclusive fence.
191e7e21c72SDaniel Vetter *
192a9a08845SLinus Torvalds * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
193e7e21c72SDaniel Vetter * all attached fences, shared and exclusive ones.
194e7e21c72SDaniel Vetter *
195e7e21c72SDaniel Vetter * Note that this only signals the completion of the respective fences, i.e. the
196e7e21c72SDaniel Vetter * DMA transfers are complete. Cache flushing and any other necessary
197e7e21c72SDaniel Vetter * preparations before CPU access can begin still need to happen.
19820e10881SJason Ekstrand *
19920e10881SJason Ekstrand * As an alternative to poll(), the set of fences on DMA buffer can be
20020e10881SJason Ekstrand * exported as a &sync_file using &dma_buf_sync_file_export.
201e7e21c72SDaniel Vetter */
202e7e21c72SDaniel Vetter
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)203f54d1867SChris Wilson static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
2049b495a58SMaarten Lankhorst {
2059b495a58SMaarten Lankhorst struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
206ff2d2384SMichel Dänzer struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
2079b495a58SMaarten Lankhorst unsigned long flags;
2089b495a58SMaarten Lankhorst
2099b495a58SMaarten Lankhorst spin_lock_irqsave(&dcb->poll->lock, flags);
2109b495a58SMaarten Lankhorst wake_up_locked_poll(dcb->poll, dcb->active);
2119b495a58SMaarten Lankhorst dcb->active = 0;
2129b495a58SMaarten Lankhorst spin_unlock_irqrestore(&dcb->poll->lock, flags);
2136b51b02aSChristian König dma_fence_put(fence);
214ff2d2384SMichel Dänzer /* Paired with get_file in dma_buf_poll */
215ff2d2384SMichel Dänzer fput(dmabuf->file);
2166b51b02aSChristian König }
2176b51b02aSChristian König
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)2180a42016dSChristian König static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
2196b51b02aSChristian König struct dma_buf_poll_cb_t *dcb)
2206b51b02aSChristian König {
2210a42016dSChristian König struct dma_resv_iter cursor;
2226b51b02aSChristian König struct dma_fence *fence;
2236b51b02aSChristian König int r;
2246b51b02aSChristian König
2257bc80a54SChristian König dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
2267bc80a54SChristian König fence) {
2276b51b02aSChristian König dma_fence_get(fence);
2286b51b02aSChristian König r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
2296b51b02aSChristian König if (!r)
2306b51b02aSChristian König return true;
2316b51b02aSChristian König dma_fence_put(fence);
2320a42016dSChristian König }
2336b51b02aSChristian König
2346b51b02aSChristian König return false;
2359b495a58SMaarten Lankhorst }
2369b495a58SMaarten Lankhorst
dma_buf_poll(struct file * file,poll_table * poll)237afc9a42bSAl Viro static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
2389b495a58SMaarten Lankhorst {
2399b495a58SMaarten Lankhorst struct dma_buf *dmabuf;
24052791eeeSChristian König struct dma_resv *resv;
24101699437SAl Viro __poll_t events;
2429b495a58SMaarten Lankhorst
2439b495a58SMaarten Lankhorst dmabuf = file->private_data;
2449b495a58SMaarten Lankhorst if (!dmabuf || !dmabuf->resv)
245a9a08845SLinus Torvalds return EPOLLERR;
2469b495a58SMaarten Lankhorst
2479b495a58SMaarten Lankhorst resv = dmabuf->resv;
2489b495a58SMaarten Lankhorst
2499b495a58SMaarten Lankhorst poll_wait(file, &dmabuf->poll, poll);
2509b495a58SMaarten Lankhorst
251a9a08845SLinus Torvalds events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
2529b495a58SMaarten Lankhorst if (!events)
2539b495a58SMaarten Lankhorst return 0;
2549b495a58SMaarten Lankhorst
2556b51b02aSChristian König dma_resv_lock(resv, NULL);
256b016cd6eSChris Wilson
2576b51b02aSChristian König if (events & EPOLLOUT) {
2586b51b02aSChristian König struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
259b016cd6eSChris Wilson
2606b51b02aSChristian König /* Check that callback isn't busy */
2619b495a58SMaarten Lankhorst spin_lock_irq(&dmabuf->poll.lock);
2629b495a58SMaarten Lankhorst if (dcb->active)
263a9a08845SLinus Torvalds events &= ~EPOLLOUT;
2649b495a58SMaarten Lankhorst else
265a9a08845SLinus Torvalds dcb->active = EPOLLOUT;
2669b495a58SMaarten Lankhorst spin_unlock_irq(&dmabuf->poll.lock);
2679b495a58SMaarten Lankhorst
2686b51b02aSChristian König if (events & EPOLLOUT) {
269ff2d2384SMichel Dänzer /* Paired with fput in dma_buf_poll_cb */
270ff2d2384SMichel Dänzer get_file(dmabuf->file);
271ff2d2384SMichel Dänzer
2720a42016dSChristian König if (!dma_buf_poll_add_cb(resv, true, dcb))
2736b51b02aSChristian König /* No callback queued, wake up any other waiters */
2743c3b177aSMaarten Lankhorst dma_buf_poll_cb(NULL, &dcb->cb);
2756b51b02aSChristian König else
276a9a08845SLinus Torvalds events &= ~EPOLLOUT;
2779b495a58SMaarten Lankhorst }
27804a5faa8SMaarten Lankhorst }
2799b495a58SMaarten Lankhorst
2806b51b02aSChristian König if (events & EPOLLIN) {
2816b51b02aSChristian König struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
2826b51b02aSChristian König
2836b51b02aSChristian König /* Check that callback isn't busy */
2846b51b02aSChristian König spin_lock_irq(&dmabuf->poll.lock);
2856b51b02aSChristian König if (dcb->active)
2866b51b02aSChristian König events &= ~EPOLLIN;
2876b51b02aSChristian König else
2886b51b02aSChristian König dcb->active = EPOLLIN;
2896b51b02aSChristian König spin_unlock_irq(&dmabuf->poll.lock);
2906b51b02aSChristian König
2916b51b02aSChristian König if (events & EPOLLIN) {
292ff2d2384SMichel Dänzer /* Paired with fput in dma_buf_poll_cb */
293ff2d2384SMichel Dänzer get_file(dmabuf->file);
294ff2d2384SMichel Dänzer
2950a42016dSChristian König if (!dma_buf_poll_add_cb(resv, false, dcb))
2966b51b02aSChristian König /* No callback queued, wake up any other waiters */
2979b495a58SMaarten Lankhorst dma_buf_poll_cb(NULL, &dcb->cb);
2986b51b02aSChristian König else
2996b51b02aSChristian König events &= ~EPOLLIN;
3006b51b02aSChristian König }
3019b495a58SMaarten Lankhorst }
3029b495a58SMaarten Lankhorst
3036b51b02aSChristian König dma_resv_unlock(resv);
3049b495a58SMaarten Lankhorst return events;
3059b495a58SMaarten Lankhorst }
3069b495a58SMaarten Lankhorst
307bb2bb903SGreg Hackmann /**
308bb2bb903SGreg Hackmann * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
309e73c317eSGuangming Cao * It could support changing the name of the dma-buf if the same
310e73c317eSGuangming Cao * piece of memory is used for multiple purpose between different devices.
311bb2bb903SGreg Hackmann *
3126d3ba803SKrzysztof Kozlowski * @dmabuf: [in] dmabuf buffer that will be renamed.
313bb2bb903SGreg Hackmann * @buf: [in] A piece of userspace memory that contains the name of
314bb2bb903SGreg Hackmann * the dma-buf.
315bb2bb903SGreg Hackmann *
316bb2bb903SGreg Hackmann * Returns 0 on success. If the dma-buf buffer is already attached to
317bb2bb903SGreg Hackmann * devices, return -EBUSY.
318bb2bb903SGreg Hackmann *
319bb2bb903SGreg Hackmann */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)320bb2bb903SGreg Hackmann static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
321bb2bb903SGreg Hackmann {
322bb2bb903SGreg Hackmann char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
323bb2bb903SGreg Hackmann
324bb2bb903SGreg Hackmann if (IS_ERR(name))
325bb2bb903SGreg Hackmann return PTR_ERR(name);
326bb2bb903SGreg Hackmann
3276348dd29SCharan Teja Kalla spin_lock(&dmabuf->name_lock);
328bb2bb903SGreg Hackmann kfree(dmabuf->name);
329bb2bb903SGreg Hackmann dmabuf->name = name;
3306348dd29SCharan Teja Kalla spin_unlock(&dmabuf->name_lock);
331bb2bb903SGreg Hackmann
332e73c317eSGuangming Cao return 0;
333bb2bb903SGreg Hackmann }
334bb2bb903SGreg Hackmann
33520e10881SJason Ekstrand #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)33620e10881SJason Ekstrand static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
33720e10881SJason Ekstrand void __user *user_data)
33820e10881SJason Ekstrand {
33920e10881SJason Ekstrand struct dma_buf_export_sync_file arg;
34020e10881SJason Ekstrand enum dma_resv_usage usage;
34120e10881SJason Ekstrand struct dma_fence *fence = NULL;
34220e10881SJason Ekstrand struct sync_file *sync_file;
34320e10881SJason Ekstrand int fd, ret;
34420e10881SJason Ekstrand
34520e10881SJason Ekstrand if (copy_from_user(&arg, user_data, sizeof(arg)))
34620e10881SJason Ekstrand return -EFAULT;
34720e10881SJason Ekstrand
34820e10881SJason Ekstrand if (arg.flags & ~DMA_BUF_SYNC_RW)
34920e10881SJason Ekstrand return -EINVAL;
35020e10881SJason Ekstrand
35120e10881SJason Ekstrand if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
35220e10881SJason Ekstrand return -EINVAL;
35320e10881SJason Ekstrand
35420e10881SJason Ekstrand fd = get_unused_fd_flags(O_CLOEXEC);
35520e10881SJason Ekstrand if (fd < 0)
35620e10881SJason Ekstrand return fd;
35720e10881SJason Ekstrand
35820e10881SJason Ekstrand usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
35920e10881SJason Ekstrand ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
36020e10881SJason Ekstrand if (ret)
36120e10881SJason Ekstrand goto err_put_fd;
36220e10881SJason Ekstrand
36320e10881SJason Ekstrand if (!fence)
36420e10881SJason Ekstrand fence = dma_fence_get_stub();
36520e10881SJason Ekstrand
36620e10881SJason Ekstrand sync_file = sync_file_create(fence);
36720e10881SJason Ekstrand
36820e10881SJason Ekstrand dma_fence_put(fence);
36920e10881SJason Ekstrand
37020e10881SJason Ekstrand if (!sync_file) {
37120e10881SJason Ekstrand ret = -ENOMEM;
37220e10881SJason Ekstrand goto err_put_fd;
37320e10881SJason Ekstrand }
37420e10881SJason Ekstrand
37520e10881SJason Ekstrand arg.fd = fd;
37620e10881SJason Ekstrand if (copy_to_user(user_data, &arg, sizeof(arg))) {
37720e10881SJason Ekstrand ret = -EFAULT;
37820e10881SJason Ekstrand goto err_put_file;
37920e10881SJason Ekstrand }
38020e10881SJason Ekstrand
38120e10881SJason Ekstrand fd_install(fd, sync_file->file);
38220e10881SJason Ekstrand
38320e10881SJason Ekstrand return 0;
38420e10881SJason Ekstrand
38520e10881SJason Ekstrand err_put_file:
38620e10881SJason Ekstrand fput(sync_file->file);
38720e10881SJason Ekstrand err_put_fd:
38820e10881SJason Ekstrand put_unused_fd(fd);
38920e10881SJason Ekstrand return ret;
39020e10881SJason Ekstrand }
39159474049SJason Ekstrand
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)39259474049SJason Ekstrand static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
39359474049SJason Ekstrand const void __user *user_data)
39459474049SJason Ekstrand {
39559474049SJason Ekstrand struct dma_buf_import_sync_file arg;
396c19083c7SJason Ekstrand struct dma_fence *fence, *f;
39759474049SJason Ekstrand enum dma_resv_usage usage;
398c19083c7SJason Ekstrand struct dma_fence_unwrap iter;
399c19083c7SJason Ekstrand unsigned int num_fences;
40059474049SJason Ekstrand int ret = 0;
40159474049SJason Ekstrand
40259474049SJason Ekstrand if (copy_from_user(&arg, user_data, sizeof(arg)))
40359474049SJason Ekstrand return -EFAULT;
40459474049SJason Ekstrand
40559474049SJason Ekstrand if (arg.flags & ~DMA_BUF_SYNC_RW)
40659474049SJason Ekstrand return -EINVAL;
40759474049SJason Ekstrand
40859474049SJason Ekstrand if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
40959474049SJason Ekstrand return -EINVAL;
41059474049SJason Ekstrand
41159474049SJason Ekstrand fence = sync_file_get_fence(arg.fd);
41259474049SJason Ekstrand if (!fence)
41359474049SJason Ekstrand return -EINVAL;
41459474049SJason Ekstrand
41559474049SJason Ekstrand usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
41659474049SJason Ekstrand DMA_RESV_USAGE_READ;
41759474049SJason Ekstrand
418c19083c7SJason Ekstrand num_fences = 0;
419c19083c7SJason Ekstrand dma_fence_unwrap_for_each(f, &iter, fence)
420c19083c7SJason Ekstrand ++num_fences;
421c19083c7SJason Ekstrand
422c19083c7SJason Ekstrand if (num_fences > 0) {
42359474049SJason Ekstrand dma_resv_lock(dmabuf->resv, NULL);
42459474049SJason Ekstrand
425c19083c7SJason Ekstrand ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
426c19083c7SJason Ekstrand if (!ret) {
427c19083c7SJason Ekstrand dma_fence_unwrap_for_each(f, &iter, fence)
428c19083c7SJason Ekstrand dma_resv_add_fence(dmabuf->resv, f, usage);
429c19083c7SJason Ekstrand }
43059474049SJason Ekstrand
43159474049SJason Ekstrand dma_resv_unlock(dmabuf->resv);
432c19083c7SJason Ekstrand }
43359474049SJason Ekstrand
43459474049SJason Ekstrand dma_fence_put(fence);
43559474049SJason Ekstrand
43659474049SJason Ekstrand return ret;
43759474049SJason Ekstrand }
43820e10881SJason Ekstrand #endif
43920e10881SJason Ekstrand
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)440c11e391dSDaniel Vetter static long dma_buf_ioctl(struct file *file,
441c11e391dSDaniel Vetter unsigned int cmd, unsigned long arg)
442c11e391dSDaniel Vetter {
443c11e391dSDaniel Vetter struct dma_buf *dmabuf;
444c11e391dSDaniel Vetter struct dma_buf_sync sync;
445c11e391dSDaniel Vetter enum dma_data_direction direction;
44618b862dcSChris Wilson int ret;
447c11e391dSDaniel Vetter
448c11e391dSDaniel Vetter dmabuf = file->private_data;
449c11e391dSDaniel Vetter
450c11e391dSDaniel Vetter switch (cmd) {
451c11e391dSDaniel Vetter case DMA_BUF_IOCTL_SYNC:
452c11e391dSDaniel Vetter if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
453c11e391dSDaniel Vetter return -EFAULT;
454c11e391dSDaniel Vetter
455c11e391dSDaniel Vetter if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
456c11e391dSDaniel Vetter return -EINVAL;
457c11e391dSDaniel Vetter
458c11e391dSDaniel Vetter switch (sync.flags & DMA_BUF_SYNC_RW) {
459c11e391dSDaniel Vetter case DMA_BUF_SYNC_READ:
460c11e391dSDaniel Vetter direction = DMA_FROM_DEVICE;
461c11e391dSDaniel Vetter break;
462c11e391dSDaniel Vetter case DMA_BUF_SYNC_WRITE:
463c11e391dSDaniel Vetter direction = DMA_TO_DEVICE;
464c11e391dSDaniel Vetter break;
465c11e391dSDaniel Vetter case DMA_BUF_SYNC_RW:
466c11e391dSDaniel Vetter direction = DMA_BIDIRECTIONAL;
467c11e391dSDaniel Vetter break;
468c11e391dSDaniel Vetter default:
469c11e391dSDaniel Vetter return -EINVAL;
470c11e391dSDaniel Vetter }
471c11e391dSDaniel Vetter
472c11e391dSDaniel Vetter if (sync.flags & DMA_BUF_SYNC_END)
47318b862dcSChris Wilson ret = dma_buf_end_cpu_access(dmabuf, direction);
474c11e391dSDaniel Vetter else
47518b862dcSChris Wilson ret = dma_buf_begin_cpu_access(dmabuf, direction);
476c11e391dSDaniel Vetter
47718b862dcSChris Wilson return ret;
478bb2bb903SGreg Hackmann
479a5bff92eSDaniel Vetter case DMA_BUF_SET_NAME_A:
480a5bff92eSDaniel Vetter case DMA_BUF_SET_NAME_B:
481bb2bb903SGreg Hackmann return dma_buf_set_name(dmabuf, (const char __user *)arg);
482bb2bb903SGreg Hackmann
48320e10881SJason Ekstrand #if IS_ENABLED(CONFIG_SYNC_FILE)
48420e10881SJason Ekstrand case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
48520e10881SJason Ekstrand return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
48659474049SJason Ekstrand case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
48759474049SJason Ekstrand return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
48820e10881SJason Ekstrand #endif
48920e10881SJason Ekstrand
490c11e391dSDaniel Vetter default:
491c11e391dSDaniel Vetter return -ENOTTY;
492c11e391dSDaniel Vetter }
493c11e391dSDaniel Vetter }
494c11e391dSDaniel Vetter
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)495bcc07111SGreg Hackmann static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
496bcc07111SGreg Hackmann {
497bcc07111SGreg Hackmann struct dma_buf *dmabuf = file->private_data;
498bcc07111SGreg Hackmann
499bcc07111SGreg Hackmann seq_printf(m, "size:\t%zu\n", dmabuf->size);
500bcc07111SGreg Hackmann /* Don't count the temporary reference taken inside procfs seq_show */
501bcc07111SGreg Hackmann seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
502bcc07111SGreg Hackmann seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
5036348dd29SCharan Teja Kalla spin_lock(&dmabuf->name_lock);
504bcc07111SGreg Hackmann if (dmabuf->name)
505bcc07111SGreg Hackmann seq_printf(m, "name:\t%s\n", dmabuf->name);
5066348dd29SCharan Teja Kalla spin_unlock(&dmabuf->name_lock);
507bcc07111SGreg Hackmann }
508bcc07111SGreg Hackmann
50935fac7e3SMaarten Lankhorst static const struct file_operations dma_buf_fops = {
51005cd8469SCharan Teja Reddy .release = dma_buf_file_release,
51135fac7e3SMaarten Lankhorst .mmap = dma_buf_mmap_internal,
51235fac7e3SMaarten Lankhorst .llseek = dma_buf_llseek,
5139b495a58SMaarten Lankhorst .poll = dma_buf_poll,
514c11e391dSDaniel Vetter .unlocked_ioctl = dma_buf_ioctl,
5151832f2d8SArnd Bergmann .compat_ioctl = compat_ptr_ioctl,
516bcc07111SGreg Hackmann .show_fdinfo = dma_buf_show_fdinfo,
51735fac7e3SMaarten Lankhorst };
51835fac7e3SMaarten Lankhorst
51935fac7e3SMaarten Lankhorst /*
52035fac7e3SMaarten Lankhorst * is_dma_buf_file - Check if struct file* is associated with dma_buf
52135fac7e3SMaarten Lankhorst */
is_dma_buf_file(struct file * file)52235fac7e3SMaarten Lankhorst static inline int is_dma_buf_file(struct file *file)
52335fac7e3SMaarten Lankhorst {
52435fac7e3SMaarten Lankhorst return file->f_op == &dma_buf_fops;
52535fac7e3SMaarten Lankhorst }
52635fac7e3SMaarten Lankhorst
dma_buf_getfile(size_t size,int flags)527f728a5eaSChristian König static struct file *dma_buf_getfile(size_t size, int flags)
528ed63bb1dSGreg Hackmann {
529370704e7SCharan Teja Kalla static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
530ed63bb1dSGreg Hackmann struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
531f728a5eaSChristian König struct file *file;
532ed63bb1dSGreg Hackmann
533ed63bb1dSGreg Hackmann if (IS_ERR(inode))
534ed63bb1dSGreg Hackmann return ERR_CAST(inode);
535ed63bb1dSGreg Hackmann
536f728a5eaSChristian König inode->i_size = size;
537f728a5eaSChristian König inode_set_bytes(inode, size);
538ed63bb1dSGreg Hackmann
539370704e7SCharan Teja Kalla /*
540370704e7SCharan Teja Kalla * The ->i_ino acquired from get_next_ino() is not unique thus
541370704e7SCharan Teja Kalla * not suitable for using it as dentry name by dmabuf stats.
542370704e7SCharan Teja Kalla * Override ->i_ino with the unique and dmabuffs specific
543370704e7SCharan Teja Kalla * value.
544370704e7SCharan Teja Kalla */
545370704e7SCharan Teja Kalla inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
54647091e4eSAl Viro flags &= O_ACCMODE | O_NONBLOCK;
547ed63bb1dSGreg Hackmann file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
548ed63bb1dSGreg Hackmann flags, &dma_buf_fops);
549ed63bb1dSGreg Hackmann if (IS_ERR(file))
550ed63bb1dSGreg Hackmann goto err_alloc_file;
551ed63bb1dSGreg Hackmann
552ed63bb1dSGreg Hackmann return file;
553ed63bb1dSGreg Hackmann
554ed63bb1dSGreg Hackmann err_alloc_file:
555ed63bb1dSGreg Hackmann iput(inode);
556ed63bb1dSGreg Hackmann return file;
557ed63bb1dSGreg Hackmann }
558ed63bb1dSGreg Hackmann
55935fac7e3SMaarten Lankhorst /**
5602904a8c1SDaniel Vetter * DOC: dma buf device access
5612904a8c1SDaniel Vetter *
5622904a8c1SDaniel Vetter * For device DMA access to a shared DMA buffer the usual sequence of operations
5632904a8c1SDaniel Vetter * is fairly simple:
5642904a8c1SDaniel Vetter *
5652904a8c1SDaniel Vetter * 1. The exporter defines his exporter instance using
5662904a8c1SDaniel Vetter * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
5672904a8c1SDaniel Vetter * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
5682904a8c1SDaniel Vetter * as a file descriptor by calling dma_buf_fd().
5692904a8c1SDaniel Vetter *
5702904a8c1SDaniel Vetter * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
5712904a8c1SDaniel Vetter * to share with: First the file descriptor is converted to a &dma_buf using
572c138782dSLiviu Dudau * dma_buf_get(). Then the buffer is attached to the device using
5732904a8c1SDaniel Vetter * dma_buf_attach().
5742904a8c1SDaniel Vetter *
5752904a8c1SDaniel Vetter * Up to this stage the exporter is still free to migrate or reallocate the
5762904a8c1SDaniel Vetter * backing storage.
5772904a8c1SDaniel Vetter *
578c138782dSLiviu Dudau * 3. Once the buffer is attached to all devices userspace can initiate DMA
5792904a8c1SDaniel Vetter * access to the shared buffer. In the kernel this is done by calling
5802904a8c1SDaniel Vetter * dma_buf_map_attachment() and dma_buf_unmap_attachment().
5812904a8c1SDaniel Vetter *
5822904a8c1SDaniel Vetter * 4. Once a driver is done with a shared buffer it needs to call
5832904a8c1SDaniel Vetter * dma_buf_detach() (after cleaning up any mappings) and then release the
58485804b70SDaniel Vetter * reference acquired with dma_buf_get() by calling dma_buf_put().
5852904a8c1SDaniel Vetter *
5862904a8c1SDaniel Vetter * For the detailed semantics exporters are expected to implement see
5872904a8c1SDaniel Vetter * &dma_buf_ops.
5882904a8c1SDaniel Vetter */
5892904a8c1SDaniel Vetter
5902904a8c1SDaniel Vetter /**
591d8fbe341SSumit Semwal * dma_buf_export - Creates a new dma_buf, and associates an anon file
59235fac7e3SMaarten Lankhorst * with this buffer, so it can be exported.
59335fac7e3SMaarten Lankhorst * Also connect the allocator specific data and ops to the buffer.
59435fac7e3SMaarten Lankhorst * Additionally, provide a name string for exporter; useful in debugging.
59535fac7e3SMaarten Lankhorst *
596d8fbe341SSumit Semwal * @exp_info: [in] holds all the export related information provided
597f641d3b5SDaniel Vetter * by the exporter. see &struct dma_buf_export_info
598d8fbe341SSumit Semwal * for further details.
59935fac7e3SMaarten Lankhorst *
60085804b70SDaniel Vetter * Returns, on success, a newly created struct dma_buf object, which wraps the
60185804b70SDaniel Vetter * supplied private data and operations for struct dma_buf_ops. On either
60285804b70SDaniel Vetter * missing ops, or error in allocating struct dma_buf, will return negative
60385804b70SDaniel Vetter * error.
60435fac7e3SMaarten Lankhorst *
6052904a8c1SDaniel Vetter * For most cases the easiest way to create @exp_info is through the
6062904a8c1SDaniel Vetter * %DEFINE_DMA_BUF_EXPORT_INFO macro.
60735fac7e3SMaarten Lankhorst */
dma_buf_export(const struct dma_buf_export_info * exp_info)608d8fbe341SSumit Semwal struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
60935fac7e3SMaarten Lankhorst {
61035fac7e3SMaarten Lankhorst struct dma_buf *dmabuf;
61152791eeeSChristian König struct dma_resv *resv = exp_info->resv;
61235fac7e3SMaarten Lankhorst struct file *file;
6133aac4502SMaarten Lankhorst size_t alloc_size = sizeof(struct dma_buf);
614a026df4cSChris Wilson int ret;
6155136629dSJagan Teki
616f728a5eaSChristian König if (WARN_ON(!exp_info->priv || !exp_info->ops
617d8fbe341SSumit Semwal || !exp_info->ops->map_dma_buf
618d8fbe341SSumit Semwal || !exp_info->ops->unmap_dma_buf
619f728a5eaSChristian König || !exp_info->ops->release))
62035fac7e3SMaarten Lankhorst return ERR_PTR(-EINVAL);
62135fac7e3SMaarten Lankhorst
62215fd552dSChristian König if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
623bd2275eeSChristian König (exp_info->ops->pin || exp_info->ops->unpin)))
62415fd552dSChristian König return ERR_PTR(-EINVAL);
62515fd552dSChristian König
626bd2275eeSChristian König if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
62735fac7e3SMaarten Lankhorst return ERR_PTR(-EINVAL);
62835fac7e3SMaarten Lankhorst
6299abdffe2SSumit Semwal if (!try_module_get(exp_info->owner))
6309abdffe2SSumit Semwal return ERR_PTR(-ENOENT);
6319abdffe2SSumit Semwal
632f728a5eaSChristian König file = dma_buf_getfile(exp_info->size, exp_info->flags);
633f728a5eaSChristian König if (IS_ERR(file)) {
634f728a5eaSChristian König ret = PTR_ERR(file);
635f728a5eaSChristian König goto err_module;
636f728a5eaSChristian König }
637f728a5eaSChristian König
638f728a5eaSChristian König if (!exp_info->resv)
639f728a5eaSChristian König alloc_size += sizeof(struct dma_resv);
640f728a5eaSChristian König else
641f728a5eaSChristian König /* prevent &dma_buf[1] == dma_buf->resv */
642f728a5eaSChristian König alloc_size += 1;
6433aac4502SMaarten Lankhorst dmabuf = kzalloc(alloc_size, GFP_KERNEL);
6449abdffe2SSumit Semwal if (!dmabuf) {
645a026df4cSChris Wilson ret = -ENOMEM;
646f728a5eaSChristian König goto err_file;
6479abdffe2SSumit Semwal }
64835fac7e3SMaarten Lankhorst
649d8fbe341SSumit Semwal dmabuf->priv = exp_info->priv;
650d8fbe341SSumit Semwal dmabuf->ops = exp_info->ops;
651d8fbe341SSumit Semwal dmabuf->size = exp_info->size;
652d8fbe341SSumit Semwal dmabuf->exp_name = exp_info->exp_name;
6539abdffe2SSumit Semwal dmabuf->owner = exp_info->owner;
6546348dd29SCharan Teja Kalla spin_lock_init(&dmabuf->name_lock);
6559b495a58SMaarten Lankhorst init_waitqueue_head(&dmabuf->poll);
6566b51b02aSChristian König dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
6576b51b02aSChristian König dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
65835fac7e3SMaarten Lankhorst INIT_LIST_HEAD(&dmabuf->attachments);
65935fac7e3SMaarten Lankhorst
66035fac7e3SMaarten Lankhorst if (!resv) {
661f728a5eaSChristian König dmabuf->resv = (struct dma_resv *)&dmabuf[1];
662f728a5eaSChristian König dma_resv_init(dmabuf->resv);
663f728a5eaSChristian König } else {
66435fac7e3SMaarten Lankhorst dmabuf->resv = resv;
66535fac7e3SMaarten Lankhorst }
66635fac7e3SMaarten Lankhorst
667f728a5eaSChristian König ret = dma_buf_stats_setup(dmabuf, file);
668f728a5eaSChristian König if (ret)
669f728a5eaSChristian König goto err_dmabuf;
67035fac7e3SMaarten Lankhorst
671f728a5eaSChristian König file->private_data = dmabuf;
672f728a5eaSChristian König file->f_path.dentry->d_fsdata = dmabuf;
673f728a5eaSChristian König dmabuf->file = file;
67435fac7e3SMaarten Lankhorst
67535fac7e3SMaarten Lankhorst mutex_lock(&db_list.lock);
67635fac7e3SMaarten Lankhorst list_add(&dmabuf->list_node, &db_list.head);
67735fac7e3SMaarten Lankhorst mutex_unlock(&db_list.lock);
67835fac7e3SMaarten Lankhorst
67935fac7e3SMaarten Lankhorst return dmabuf;
680a026df4cSChris Wilson
681a026df4cSChris Wilson err_dmabuf:
682f728a5eaSChristian König if (!resv)
683f728a5eaSChristian König dma_resv_fini(dmabuf->resv);
684a026df4cSChris Wilson kfree(dmabuf);
685f728a5eaSChristian König err_file:
686f728a5eaSChristian König fput(file);
687a026df4cSChris Wilson err_module:
688a026df4cSChris Wilson module_put(exp_info->owner);
689a026df4cSChris Wilson return ERR_PTR(ret);
69035fac7e3SMaarten Lankhorst }
69116b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
69235fac7e3SMaarten Lankhorst
69335fac7e3SMaarten Lankhorst /**
69485804b70SDaniel Vetter * dma_buf_fd - returns a file descriptor for the given struct dma_buf
69535fac7e3SMaarten Lankhorst * @dmabuf: [in] pointer to dma_buf for which fd is required.
69635fac7e3SMaarten Lankhorst * @flags: [in] flags to give to fd
69735fac7e3SMaarten Lankhorst *
69835fac7e3SMaarten Lankhorst * On success, returns an associated 'fd'. Else, returns error.
69935fac7e3SMaarten Lankhorst */
dma_buf_fd(struct dma_buf * dmabuf,int flags)70035fac7e3SMaarten Lankhorst int dma_buf_fd(struct dma_buf *dmabuf, int flags)
70135fac7e3SMaarten Lankhorst {
70235fac7e3SMaarten Lankhorst int fd;
70335fac7e3SMaarten Lankhorst
70435fac7e3SMaarten Lankhorst if (!dmabuf || !dmabuf->file)
70535fac7e3SMaarten Lankhorst return -EINVAL;
70635fac7e3SMaarten Lankhorst
70735fac7e3SMaarten Lankhorst fd = get_unused_fd_flags(flags);
70835fac7e3SMaarten Lankhorst if (fd < 0)
70935fac7e3SMaarten Lankhorst return fd;
71035fac7e3SMaarten Lankhorst
71135fac7e3SMaarten Lankhorst fd_install(fd, dmabuf->file);
71235fac7e3SMaarten Lankhorst
71335fac7e3SMaarten Lankhorst return fd;
71435fac7e3SMaarten Lankhorst }
71516b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
71635fac7e3SMaarten Lankhorst
71735fac7e3SMaarten Lankhorst /**
71885804b70SDaniel Vetter * dma_buf_get - returns the struct dma_buf related to an fd
71985804b70SDaniel Vetter * @fd: [in] fd associated with the struct dma_buf to be returned
72035fac7e3SMaarten Lankhorst *
72185804b70SDaniel Vetter * On success, returns the struct dma_buf associated with an fd; uses
72235fac7e3SMaarten Lankhorst * file's refcounting done by fget to increase refcount. returns ERR_PTR
72335fac7e3SMaarten Lankhorst * otherwise.
72435fac7e3SMaarten Lankhorst */
dma_buf_get(int fd)72535fac7e3SMaarten Lankhorst struct dma_buf *dma_buf_get(int fd)
72635fac7e3SMaarten Lankhorst {
72735fac7e3SMaarten Lankhorst struct file *file;
72835fac7e3SMaarten Lankhorst
72935fac7e3SMaarten Lankhorst file = fget(fd);
73035fac7e3SMaarten Lankhorst
73135fac7e3SMaarten Lankhorst if (!file)
73235fac7e3SMaarten Lankhorst return ERR_PTR(-EBADF);
73335fac7e3SMaarten Lankhorst
73435fac7e3SMaarten Lankhorst if (!is_dma_buf_file(file)) {
73535fac7e3SMaarten Lankhorst fput(file);
73635fac7e3SMaarten Lankhorst return ERR_PTR(-EINVAL);
73735fac7e3SMaarten Lankhorst }
73835fac7e3SMaarten Lankhorst
73935fac7e3SMaarten Lankhorst return file->private_data;
74035fac7e3SMaarten Lankhorst }
74116b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
74235fac7e3SMaarten Lankhorst
74335fac7e3SMaarten Lankhorst /**
74435fac7e3SMaarten Lankhorst * dma_buf_put - decreases refcount of the buffer
74535fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to reduce refcount of
74635fac7e3SMaarten Lankhorst *
7472904a8c1SDaniel Vetter * Uses file's refcounting done implicitly by fput().
7482904a8c1SDaniel Vetter *
7492904a8c1SDaniel Vetter * If, as a result of this call, the refcount becomes 0, the 'release' file
750e9b4d7b5SDaniel Vetter * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
751e9b4d7b5SDaniel Vetter * in turn, and frees the memory allocated for dmabuf when exported.
75235fac7e3SMaarten Lankhorst */
dma_buf_put(struct dma_buf * dmabuf)75335fac7e3SMaarten Lankhorst void dma_buf_put(struct dma_buf *dmabuf)
75435fac7e3SMaarten Lankhorst {
75535fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf || !dmabuf->file))
75635fac7e3SMaarten Lankhorst return;
75735fac7e3SMaarten Lankhorst
75835fac7e3SMaarten Lankhorst fput(dmabuf->file);
75935fac7e3SMaarten Lankhorst }
76016b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
76135fac7e3SMaarten Lankhorst
mangle_sg_table(struct sg_table * sg_table)76284335675SDaniel Vetter static void mangle_sg_table(struct sg_table *sg_table)
76384335675SDaniel Vetter {
76484335675SDaniel Vetter #ifdef CONFIG_DMABUF_DEBUG
76584335675SDaniel Vetter int i;
76684335675SDaniel Vetter struct scatterlist *sg;
76784335675SDaniel Vetter
76884335675SDaniel Vetter /* To catch abuse of the underlying struct page by importers mix
76984335675SDaniel Vetter * up the bits, but take care to preserve the low SG_ bits to
77084335675SDaniel Vetter * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
77184335675SDaniel Vetter * before passing the sgt back to the exporter. */
77284335675SDaniel Vetter for_each_sgtable_sg(sg_table, sg, i)
77384335675SDaniel Vetter sg->page_link ^= ~0xffUL;
77484335675SDaniel Vetter #endif
77584335675SDaniel Vetter
77684335675SDaniel Vetter }
__map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction direction)77784335675SDaniel Vetter static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
77884335675SDaniel Vetter enum dma_data_direction direction)
77984335675SDaniel Vetter {
78084335675SDaniel Vetter struct sg_table *sg_table;
78146b35b33SChristian König signed long ret;
78284335675SDaniel Vetter
78384335675SDaniel Vetter sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
78446b35b33SChristian König if (IS_ERR_OR_NULL(sg_table))
78546b35b33SChristian König return sg_table;
78684335675SDaniel Vetter
78746b35b33SChristian König if (!dma_buf_attachment_is_dynamic(attach)) {
78846b35b33SChristian König ret = dma_resv_wait_timeout(attach->dmabuf->resv,
78946b35b33SChristian König DMA_RESV_USAGE_KERNEL, true,
79046b35b33SChristian König MAX_SCHEDULE_TIMEOUT);
79146b35b33SChristian König if (ret < 0) {
79246b35b33SChristian König attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
79346b35b33SChristian König direction);
79446b35b33SChristian König return ERR_PTR(ret);
79546b35b33SChristian König }
79646b35b33SChristian König }
79746b35b33SChristian König
79884335675SDaniel Vetter mangle_sg_table(sg_table);
79984335675SDaniel Vetter return sg_table;
80084335675SDaniel Vetter }
80184335675SDaniel Vetter
80235fac7e3SMaarten Lankhorst /**
803ae2e7f28SDmitry Osipenko * DOC: locking convention
804ae2e7f28SDmitry Osipenko *
805ae2e7f28SDmitry Osipenko * In order to avoid deadlock situations between dma-buf exports and importers,
806ae2e7f28SDmitry Osipenko * all dma-buf API users must follow the common dma-buf locking convention.
807ae2e7f28SDmitry Osipenko *
808ae2e7f28SDmitry Osipenko * Convention for importers
809ae2e7f28SDmitry Osipenko *
810ae2e7f28SDmitry Osipenko * 1. Importers must hold the dma-buf reservation lock when calling these
811ae2e7f28SDmitry Osipenko * functions:
812ae2e7f28SDmitry Osipenko *
813ae2e7f28SDmitry Osipenko * - dma_buf_pin()
814ae2e7f28SDmitry Osipenko * - dma_buf_unpin()
815ae2e7f28SDmitry Osipenko * - dma_buf_map_attachment()
816ae2e7f28SDmitry Osipenko * - dma_buf_unmap_attachment()
817ae2e7f28SDmitry Osipenko * - dma_buf_vmap()
818ae2e7f28SDmitry Osipenko * - dma_buf_vunmap()
819ae2e7f28SDmitry Osipenko *
820ae2e7f28SDmitry Osipenko * 2. Importers must not hold the dma-buf reservation lock when calling these
821ae2e7f28SDmitry Osipenko * functions:
822ae2e7f28SDmitry Osipenko *
823ae2e7f28SDmitry Osipenko * - dma_buf_attach()
824ae2e7f28SDmitry Osipenko * - dma_buf_dynamic_attach()
825ae2e7f28SDmitry Osipenko * - dma_buf_detach()
826e3ecbd21SMaíra Canal * - dma_buf_export()
827ae2e7f28SDmitry Osipenko * - dma_buf_fd()
828ae2e7f28SDmitry Osipenko * - dma_buf_get()
829ae2e7f28SDmitry Osipenko * - dma_buf_put()
830ae2e7f28SDmitry Osipenko * - dma_buf_mmap()
831ae2e7f28SDmitry Osipenko * - dma_buf_begin_cpu_access()
832ae2e7f28SDmitry Osipenko * - dma_buf_end_cpu_access()
833ae2e7f28SDmitry Osipenko * - dma_buf_map_attachment_unlocked()
834ae2e7f28SDmitry Osipenko * - dma_buf_unmap_attachment_unlocked()
835ae2e7f28SDmitry Osipenko * - dma_buf_vmap_unlocked()
836ae2e7f28SDmitry Osipenko * - dma_buf_vunmap_unlocked()
837ae2e7f28SDmitry Osipenko *
838ae2e7f28SDmitry Osipenko * Convention for exporters
839ae2e7f28SDmitry Osipenko *
840ae2e7f28SDmitry Osipenko * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
841ae2e7f28SDmitry Osipenko * reservation and exporter can take the lock:
842ae2e7f28SDmitry Osipenko *
843ae2e7f28SDmitry Osipenko * - &dma_buf_ops.attach()
844ae2e7f28SDmitry Osipenko * - &dma_buf_ops.detach()
845ae2e7f28SDmitry Osipenko * - &dma_buf_ops.release()
846ae2e7f28SDmitry Osipenko * - &dma_buf_ops.begin_cpu_access()
847ae2e7f28SDmitry Osipenko * - &dma_buf_ops.end_cpu_access()
848*8021fa16SDmitry Osipenko * - &dma_buf_ops.mmap()
849ae2e7f28SDmitry Osipenko *
850ae2e7f28SDmitry Osipenko * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
851ae2e7f28SDmitry Osipenko * reservation and exporter can't take the lock:
852ae2e7f28SDmitry Osipenko *
853ae2e7f28SDmitry Osipenko * - &dma_buf_ops.pin()
854ae2e7f28SDmitry Osipenko * - &dma_buf_ops.unpin()
855ae2e7f28SDmitry Osipenko * - &dma_buf_ops.map_dma_buf()
856ae2e7f28SDmitry Osipenko * - &dma_buf_ops.unmap_dma_buf()
857ae2e7f28SDmitry Osipenko * - &dma_buf_ops.vmap()
858ae2e7f28SDmitry Osipenko * - &dma_buf_ops.vunmap()
859ae2e7f28SDmitry Osipenko *
860ae2e7f28SDmitry Osipenko * 3. Exporters must hold the dma-buf reservation lock when calling these
861ae2e7f28SDmitry Osipenko * functions:
862ae2e7f28SDmitry Osipenko *
863ae2e7f28SDmitry Osipenko * - dma_buf_move_notify()
864ae2e7f28SDmitry Osipenko */
865ae2e7f28SDmitry Osipenko
866ae2e7f28SDmitry Osipenko /**
86785804b70SDaniel Vetter * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
86835fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to attach device to.
86935fac7e3SMaarten Lankhorst * @dev: [in] device to be attached.
8706f49c251SRandy Dunlap * @importer_ops: [in] importer operations for the attachment
8716f49c251SRandy Dunlap * @importer_priv: [in] importer private pointer for the attachment
87235fac7e3SMaarten Lankhorst *
8732904a8c1SDaniel Vetter * Returns struct dma_buf_attachment pointer for this attachment. Attachments
8742904a8c1SDaniel Vetter * must be cleaned up by calling dma_buf_detach().
8752904a8c1SDaniel Vetter *
87685804b70SDaniel Vetter * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
87785804b70SDaniel Vetter * functionality.
87885804b70SDaniel Vetter *
8792904a8c1SDaniel Vetter * Returns:
8802904a8c1SDaniel Vetter *
8812904a8c1SDaniel Vetter * A pointer to newly created &dma_buf_attachment on success, or a negative
8822904a8c1SDaniel Vetter * error code wrapped into a pointer on failure.
8832904a8c1SDaniel Vetter *
8842904a8c1SDaniel Vetter * Note that this can fail if the backing storage of @dmabuf is in a place not
8852904a8c1SDaniel Vetter * accessible to @dev, and cannot be moved to a more suitable place. This is
8862904a8c1SDaniel Vetter * indicated with the error code -EBUSY.
88735fac7e3SMaarten Lankhorst */
88815fd552dSChristian König struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)88915fd552dSChristian König dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
890bb42df46SChristian König const struct dma_buf_attach_ops *importer_ops,
891bb42df46SChristian König void *importer_priv)
89235fac7e3SMaarten Lankhorst {
89335fac7e3SMaarten Lankhorst struct dma_buf_attachment *attach;
89435fac7e3SMaarten Lankhorst int ret;
89535fac7e3SMaarten Lankhorst
89635fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf || !dev))
89735fac7e3SMaarten Lankhorst return ERR_PTR(-EINVAL);
89835fac7e3SMaarten Lankhorst
8994981cdb0SChristian König if (WARN_ON(importer_ops && !importer_ops->move_notify))
9004981cdb0SChristian König return ERR_PTR(-EINVAL);
9014981cdb0SChristian König
902db7942b6SMarkus Elfring attach = kzalloc(sizeof(*attach), GFP_KERNEL);
90334d84ec4SMarkus Elfring if (!attach)
90435fac7e3SMaarten Lankhorst return ERR_PTR(-ENOMEM);
90535fac7e3SMaarten Lankhorst
90635fac7e3SMaarten Lankhorst attach->dev = dev;
90735fac7e3SMaarten Lankhorst attach->dmabuf = dmabuf;
90809606b54SChristian König if (importer_ops)
90909606b54SChristian König attach->peer2peer = importer_ops->allow_peer2peer;
910bb42df46SChristian König attach->importer_ops = importer_ops;
911bb42df46SChristian König attach->importer_priv = importer_priv;
91235fac7e3SMaarten Lankhorst
91335fac7e3SMaarten Lankhorst if (dmabuf->ops->attach) {
914a19741e5SChristian König ret = dmabuf->ops->attach(dmabuf, attach);
91535fac7e3SMaarten Lankhorst if (ret)
91635fac7e3SMaarten Lankhorst goto err_attach;
91735fac7e3SMaarten Lankhorst }
91815fd552dSChristian König dma_resv_lock(dmabuf->resv, NULL);
91935fac7e3SMaarten Lankhorst list_add(&attach->node, &dmabuf->attachments);
92015fd552dSChristian König dma_resv_unlock(dmabuf->resv);
92135fac7e3SMaarten Lankhorst
92215fd552dSChristian König /* When either the importer or the exporter can't handle dynamic
92315fd552dSChristian König * mappings we cache the mapping here to avoid issues with the
92415fd552dSChristian König * reservation object lock.
92515fd552dSChristian König */
92615fd552dSChristian König if (dma_buf_attachment_is_dynamic(attach) !=
92715fd552dSChristian König dma_buf_is_dynamic(dmabuf)) {
92815fd552dSChristian König struct sg_table *sgt;
92915fd552dSChristian König
93015fd552dSChristian König dma_resv_lock(attach->dmabuf->resv, NULL);
931809d9c72SDmitry Osipenko if (dma_buf_is_dynamic(attach->dmabuf)) {
9327e008b02SChristian König ret = dmabuf->ops->pin(attach);
933bb42df46SChristian König if (ret)
934bb42df46SChristian König goto err_unlock;
935bb42df46SChristian König }
93615fd552dSChristian König
93784335675SDaniel Vetter sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
93815fd552dSChristian König if (!sgt)
93915fd552dSChristian König sgt = ERR_PTR(-ENOMEM);
94015fd552dSChristian König if (IS_ERR(sgt)) {
94115fd552dSChristian König ret = PTR_ERR(sgt);
942bb42df46SChristian König goto err_unpin;
94315fd552dSChristian König }
94415fd552dSChristian König dma_resv_unlock(attach->dmabuf->resv);
94515fd552dSChristian König attach->sgt = sgt;
94615fd552dSChristian König attach->dir = DMA_BIDIRECTIONAL;
94715fd552dSChristian König }
94815fd552dSChristian König
94935fac7e3SMaarten Lankhorst return attach;
95035fac7e3SMaarten Lankhorst
95135fac7e3SMaarten Lankhorst err_attach:
95235fac7e3SMaarten Lankhorst kfree(attach);
95335fac7e3SMaarten Lankhorst return ERR_PTR(ret);
95415fd552dSChristian König
955bb42df46SChristian König err_unpin:
956bb42df46SChristian König if (dma_buf_is_dynamic(attach->dmabuf))
9577e008b02SChristian König dmabuf->ops->unpin(attach);
958bb42df46SChristian König
95915fd552dSChristian König err_unlock:
96015fd552dSChristian König dma_resv_unlock(attach->dmabuf->resv);
96115fd552dSChristian König
96215fd552dSChristian König dma_buf_detach(dmabuf, attach);
96315fd552dSChristian König return ERR_PTR(ret);
96415fd552dSChristian König }
96516b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
96615fd552dSChristian König
96715fd552dSChristian König /**
96815fd552dSChristian König * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
96915fd552dSChristian König * @dmabuf: [in] buffer to attach device to.
97015fd552dSChristian König * @dev: [in] device to be attached.
97115fd552dSChristian König *
97215fd552dSChristian König * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
97315fd552dSChristian König * mapping.
97415fd552dSChristian König */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)97515fd552dSChristian König struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
97615fd552dSChristian König struct device *dev)
97715fd552dSChristian König {
978bb42df46SChristian König return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
97935fac7e3SMaarten Lankhorst }
98016b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
98135fac7e3SMaarten Lankhorst
__unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)98284335675SDaniel Vetter static void __unmap_dma_buf(struct dma_buf_attachment *attach,
98384335675SDaniel Vetter struct sg_table *sg_table,
98484335675SDaniel Vetter enum dma_data_direction direction)
98584335675SDaniel Vetter {
98684335675SDaniel Vetter /* uses XOR, hence this unmangles */
98784335675SDaniel Vetter mangle_sg_table(sg_table);
98884335675SDaniel Vetter
98984335675SDaniel Vetter attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
99084335675SDaniel Vetter }
99184335675SDaniel Vetter
99235fac7e3SMaarten Lankhorst /**
99385804b70SDaniel Vetter * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
99435fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to detach from.
99535fac7e3SMaarten Lankhorst * @attach: [in] attachment to be detached; is free'd after this call.
99635fac7e3SMaarten Lankhorst *
9972904a8c1SDaniel Vetter * Clean up a device attachment obtained by calling dma_buf_attach().
99885804b70SDaniel Vetter *
99985804b70SDaniel Vetter * Optionally this calls &dma_buf_ops.detach for device-specific detach.
100035fac7e3SMaarten Lankhorst */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)100135fac7e3SMaarten Lankhorst void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
100235fac7e3SMaarten Lankhorst {
1003d3292daeSDmitry Osipenko if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
100435fac7e3SMaarten Lankhorst return;
100535fac7e3SMaarten Lankhorst
1006d3292daeSDmitry Osipenko dma_resv_lock(dmabuf->resv, NULL);
100715fd552dSChristian König
100835fac7e3SMaarten Lankhorst if (attach->sgt) {
1009f13e143eSChristian König
101084335675SDaniel Vetter __unmap_dma_buf(attach, attach->sgt, attach->dir);
1011f13e143eSChristian König
1012809d9c72SDmitry Osipenko if (dma_buf_is_dynamic(attach->dmabuf))
10137e008b02SChristian König dmabuf->ops->unpin(attach);
101415fd552dSChristian König }
101535fac7e3SMaarten Lankhorst list_del(&attach->node);
1016809d9c72SDmitry Osipenko
101715fd552dSChristian König dma_resv_unlock(dmabuf->resv);
1018809d9c72SDmitry Osipenko
101935fac7e3SMaarten Lankhorst if (dmabuf->ops->detach)
102035fac7e3SMaarten Lankhorst dmabuf->ops->detach(dmabuf, attach);
102135fac7e3SMaarten Lankhorst
102235fac7e3SMaarten Lankhorst kfree(attach);
102335fac7e3SMaarten Lankhorst }
102416b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
102535fac7e3SMaarten Lankhorst
102635fac7e3SMaarten Lankhorst /**
1027bb42df46SChristian König * dma_buf_pin - Lock down the DMA-buf
1028bb42df46SChristian König * @attach: [in] attachment which should be pinned
1029bb42df46SChristian König *
1030c545781eSDaniel Vetter * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1031c545781eSDaniel Vetter * call this, and only for limited use cases like scanout and not for temporary
1032c545781eSDaniel Vetter * pin operations. It is not permitted to allow userspace to pin arbitrary
1033c545781eSDaniel Vetter * amounts of buffers through this interface.
1034c545781eSDaniel Vetter *
1035c545781eSDaniel Vetter * Buffers must be unpinned by calling dma_buf_unpin().
1036c545781eSDaniel Vetter *
1037bb42df46SChristian König * Returns:
1038bb42df46SChristian König * 0 on success, negative error code on failure.
1039bb42df46SChristian König */
dma_buf_pin(struct dma_buf_attachment * attach)1040bb42df46SChristian König int dma_buf_pin(struct dma_buf_attachment *attach)
1041bb42df46SChristian König {
1042bb42df46SChristian König struct dma_buf *dmabuf = attach->dmabuf;
1043bb42df46SChristian König int ret = 0;
1044bb42df46SChristian König
1045c545781eSDaniel Vetter WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1046c545781eSDaniel Vetter
1047bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
1048bb42df46SChristian König
1049bb42df46SChristian König if (dmabuf->ops->pin)
1050bb42df46SChristian König ret = dmabuf->ops->pin(attach);
1051bb42df46SChristian König
1052bb42df46SChristian König return ret;
1053bb42df46SChristian König }
105416b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1055bb42df46SChristian König
1056bb42df46SChristian König /**
1057c545781eSDaniel Vetter * dma_buf_unpin - Unpin a DMA-buf
1058bb42df46SChristian König * @attach: [in] attachment which should be unpinned
1059c545781eSDaniel Vetter *
1060c545781eSDaniel Vetter * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1061c545781eSDaniel Vetter * any mapping of @attach again and inform the importer through
1062c545781eSDaniel Vetter * &dma_buf_attach_ops.move_notify.
1063bb42df46SChristian König */
dma_buf_unpin(struct dma_buf_attachment * attach)1064bb42df46SChristian König void dma_buf_unpin(struct dma_buf_attachment *attach)
1065bb42df46SChristian König {
1066bb42df46SChristian König struct dma_buf *dmabuf = attach->dmabuf;
1067bb42df46SChristian König
1068c545781eSDaniel Vetter WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1069c545781eSDaniel Vetter
1070bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
1071bb42df46SChristian König
1072bb42df46SChristian König if (dmabuf->ops->unpin)
1073bb42df46SChristian König dmabuf->ops->unpin(attach);
1074bb42df46SChristian König }
107516b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1076bb42df46SChristian König
1077bb42df46SChristian König /**
107835fac7e3SMaarten Lankhorst * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
107935fac7e3SMaarten Lankhorst * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
108035fac7e3SMaarten Lankhorst * dma_buf_ops.
108135fac7e3SMaarten Lankhorst * @attach: [in] attachment whose scatterlist is to be returned
108235fac7e3SMaarten Lankhorst * @direction: [in] direction of DMA transfer
108335fac7e3SMaarten Lankhorst *
108435fac7e3SMaarten Lankhorst * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
10852904a8c1SDaniel Vetter * on error. May return -EINTR if it is interrupted by a signal.
10862904a8c1SDaniel Vetter *
1087ac80cd17SJianxin Xiong * On success, the DMA addresses and lengths in the returned scatterlist are
1088ac80cd17SJianxin Xiong * PAGE_SIZE aligned.
1089ac80cd17SJianxin Xiong *
1090c138782dSLiviu Dudau * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
10912904a8c1SDaniel Vetter * the underlying backing storage is pinned for as long as a mapping exists,
10922904a8c1SDaniel Vetter * therefore users/importers should not hold onto a mapping for undue amounts of
10932904a8c1SDaniel Vetter * time.
109489bcadc8SDaniel Vetter *
109589bcadc8SDaniel Vetter * Important: Dynamic importers must wait for the exclusive fence of the struct
109689bcadc8SDaniel Vetter * dma_resv attached to the DMA-BUF first.
109735fac7e3SMaarten Lankhorst */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)109835fac7e3SMaarten Lankhorst struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
109935fac7e3SMaarten Lankhorst enum dma_data_direction direction)
110035fac7e3SMaarten Lankhorst {
1101531beb06SColin Ian King struct sg_table *sg_table;
1102bb42df46SChristian König int r;
110335fac7e3SMaarten Lankhorst
110435fac7e3SMaarten Lankhorst might_sleep();
110535fac7e3SMaarten Lankhorst
110635fac7e3SMaarten Lankhorst if (WARN_ON(!attach || !attach->dmabuf))
110735fac7e3SMaarten Lankhorst return ERR_PTR(-EINVAL);
110835fac7e3SMaarten Lankhorst
110915fd552dSChristian König dma_resv_assert_held(attach->dmabuf->resv);
111015fd552dSChristian König
1111f13e143eSChristian König if (attach->sgt) {
1112f13e143eSChristian König /*
1113f13e143eSChristian König * Two mappings with different directions for the same
1114f13e143eSChristian König * attachment are not allowed.
1115f13e143eSChristian König */
1116f13e143eSChristian König if (attach->dir != direction &&
1117f13e143eSChristian König attach->dir != DMA_BIDIRECTIONAL)
1118f13e143eSChristian König return ERR_PTR(-EBUSY);
1119f13e143eSChristian König
1120f13e143eSChristian König return attach->sgt;
1121f13e143eSChristian König }
1122f13e143eSChristian König
1123bb42df46SChristian König if (dma_buf_is_dynamic(attach->dmabuf)) {
11244981cdb0SChristian König if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
11257e008b02SChristian König r = attach->dmabuf->ops->pin(attach);
1126bb42df46SChristian König if (r)
1127bb42df46SChristian König return ERR_PTR(r);
1128bb42df46SChristian König }
1129bb42df46SChristian König }
113015fd552dSChristian König
113184335675SDaniel Vetter sg_table = __map_dma_buf(attach, direction);
113235fac7e3SMaarten Lankhorst if (!sg_table)
113335fac7e3SMaarten Lankhorst sg_table = ERR_PTR(-ENOMEM);
113435fac7e3SMaarten Lankhorst
1135bb42df46SChristian König if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
11364981cdb0SChristian König !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
11377e008b02SChristian König attach->dmabuf->ops->unpin(attach);
1138bb42df46SChristian König
1139f13e143eSChristian König if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1140f13e143eSChristian König attach->sgt = sg_table;
1141f13e143eSChristian König attach->dir = direction;
1142f13e143eSChristian König }
1143f13e143eSChristian König
1144ac80cd17SJianxin Xiong #ifdef CONFIG_DMA_API_DEBUG
114500efd65aSJianxin Xiong if (!IS_ERR(sg_table)) {
1146ac80cd17SJianxin Xiong struct scatterlist *sg;
1147ac80cd17SJianxin Xiong u64 addr;
1148ac80cd17SJianxin Xiong int len;
1149ac80cd17SJianxin Xiong int i;
1150ac80cd17SJianxin Xiong
1151ac80cd17SJianxin Xiong for_each_sgtable_dma_sg(sg_table, sg, i) {
1152ac80cd17SJianxin Xiong addr = sg_dma_address(sg);
1153ac80cd17SJianxin Xiong len = sg_dma_len(sg);
1154ac80cd17SJianxin Xiong if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1155ac80cd17SJianxin Xiong pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1156ac80cd17SJianxin Xiong __func__, addr, len);
1157ac80cd17SJianxin Xiong }
1158ac80cd17SJianxin Xiong }
1159ac80cd17SJianxin Xiong }
1160ac80cd17SJianxin Xiong #endif /* CONFIG_DMA_API_DEBUG */
116135fac7e3SMaarten Lankhorst return sg_table;
116235fac7e3SMaarten Lankhorst }
116316b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
116435fac7e3SMaarten Lankhorst
116535fac7e3SMaarten Lankhorst /**
116619d6634dSDmitry Osipenko * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
116719d6634dSDmitry Osipenko * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
116819d6634dSDmitry Osipenko * dma_buf_ops.
116919d6634dSDmitry Osipenko * @attach: [in] attachment whose scatterlist is to be returned
117019d6634dSDmitry Osipenko * @direction: [in] direction of DMA transfer
117119d6634dSDmitry Osipenko *
117219d6634dSDmitry Osipenko * Unlocked variant of dma_buf_map_attachment().
117319d6634dSDmitry Osipenko */
117419d6634dSDmitry Osipenko struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)117519d6634dSDmitry Osipenko dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
117619d6634dSDmitry Osipenko enum dma_data_direction direction)
117719d6634dSDmitry Osipenko {
117819d6634dSDmitry Osipenko struct sg_table *sg_table;
117919d6634dSDmitry Osipenko
118019d6634dSDmitry Osipenko might_sleep();
118119d6634dSDmitry Osipenko
118219d6634dSDmitry Osipenko if (WARN_ON(!attach || !attach->dmabuf))
118319d6634dSDmitry Osipenko return ERR_PTR(-EINVAL);
118419d6634dSDmitry Osipenko
118519d6634dSDmitry Osipenko dma_resv_lock(attach->dmabuf->resv, NULL);
118619d6634dSDmitry Osipenko sg_table = dma_buf_map_attachment(attach, direction);
118719d6634dSDmitry Osipenko dma_resv_unlock(attach->dmabuf->resv);
118819d6634dSDmitry Osipenko
118919d6634dSDmitry Osipenko return sg_table;
119019d6634dSDmitry Osipenko }
119119d6634dSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
119219d6634dSDmitry Osipenko
119319d6634dSDmitry Osipenko /**
119435fac7e3SMaarten Lankhorst * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
119535fac7e3SMaarten Lankhorst * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
119635fac7e3SMaarten Lankhorst * dma_buf_ops.
119735fac7e3SMaarten Lankhorst * @attach: [in] attachment to unmap buffer from
119835fac7e3SMaarten Lankhorst * @sg_table: [in] scatterlist info of the buffer to unmap
119935fac7e3SMaarten Lankhorst * @direction: [in] direction of DMA transfer
120035fac7e3SMaarten Lankhorst *
12012904a8c1SDaniel Vetter * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
120235fac7e3SMaarten Lankhorst */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)120335fac7e3SMaarten Lankhorst void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
120435fac7e3SMaarten Lankhorst struct sg_table *sg_table,
120535fac7e3SMaarten Lankhorst enum dma_data_direction direction)
120635fac7e3SMaarten Lankhorst {
120735fac7e3SMaarten Lankhorst might_sleep();
120835fac7e3SMaarten Lankhorst
120935fac7e3SMaarten Lankhorst if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
121035fac7e3SMaarten Lankhorst return;
121135fac7e3SMaarten Lankhorst
121215fd552dSChristian König dma_resv_assert_held(attach->dmabuf->resv);
121315fd552dSChristian König
1214f13e143eSChristian König if (attach->sgt == sg_table)
1215f13e143eSChristian König return;
1216f13e143eSChristian König
121784335675SDaniel Vetter __unmap_dma_buf(attach, sg_table, direction);
1218bb42df46SChristian König
1219bb42df46SChristian König if (dma_buf_is_dynamic(attach->dmabuf) &&
12204981cdb0SChristian König !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1221bb42df46SChristian König dma_buf_unpin(attach);
122235fac7e3SMaarten Lankhorst }
122316b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
122435fac7e3SMaarten Lankhorst
12250959a168SDaniel Vetter /**
122619d6634dSDmitry Osipenko * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
122719d6634dSDmitry Osipenko * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
122819d6634dSDmitry Osipenko * dma_buf_ops.
122919d6634dSDmitry Osipenko * @attach: [in] attachment to unmap buffer from
123019d6634dSDmitry Osipenko * @sg_table: [in] scatterlist info of the buffer to unmap
123119d6634dSDmitry Osipenko * @direction: [in] direction of DMA transfer
123219d6634dSDmitry Osipenko *
123319d6634dSDmitry Osipenko * Unlocked variant of dma_buf_unmap_attachment().
123419d6634dSDmitry Osipenko */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)123519d6634dSDmitry Osipenko void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
123619d6634dSDmitry Osipenko struct sg_table *sg_table,
123719d6634dSDmitry Osipenko enum dma_data_direction direction)
123819d6634dSDmitry Osipenko {
123919d6634dSDmitry Osipenko might_sleep();
124019d6634dSDmitry Osipenko
124119d6634dSDmitry Osipenko if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
124219d6634dSDmitry Osipenko return;
124319d6634dSDmitry Osipenko
124419d6634dSDmitry Osipenko dma_resv_lock(attach->dmabuf->resv, NULL);
124519d6634dSDmitry Osipenko dma_buf_unmap_attachment(attach, sg_table, direction);
124619d6634dSDmitry Osipenko dma_resv_unlock(attach->dmabuf->resv);
124719d6634dSDmitry Osipenko }
124819d6634dSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
124919d6634dSDmitry Osipenko
125019d6634dSDmitry Osipenko /**
1251bb42df46SChristian König * dma_buf_move_notify - notify attachments that DMA-buf is moving
1252bb42df46SChristian König *
1253bb42df46SChristian König * @dmabuf: [in] buffer which is moving
1254bb42df46SChristian König *
1255b56ffa58ST.J. Mercier * Informs all attachments that they need to destroy and recreate all their
1256bb42df46SChristian König * mappings.
1257bb42df46SChristian König */
dma_buf_move_notify(struct dma_buf * dmabuf)1258bb42df46SChristian König void dma_buf_move_notify(struct dma_buf *dmabuf)
1259bb42df46SChristian König {
1260bb42df46SChristian König struct dma_buf_attachment *attach;
1261bb42df46SChristian König
1262bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
1263bb42df46SChristian König
1264bb42df46SChristian König list_for_each_entry(attach, &dmabuf->attachments, node)
12654981cdb0SChristian König if (attach->importer_ops)
1266bb42df46SChristian König attach->importer_ops->move_notify(attach);
1267bb42df46SChristian König }
126816b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1269bb42df46SChristian König
1270bb42df46SChristian König /**
12710959a168SDaniel Vetter * DOC: cpu access
12720959a168SDaniel Vetter *
1273b56ffa58ST.J. Mercier * There are multiple reasons for supporting CPU access to a dma buffer object:
12740959a168SDaniel Vetter *
12750959a168SDaniel Vetter * - Fallback operations in the kernel, for example when a device is connected
12760959a168SDaniel Vetter * over USB and the kernel needs to shuffle the data around first before
1277b56ffa58ST.J. Mercier * sending it away. Cache coherency is handled by bracketing any transactions
12780959a168SDaniel Vetter * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
12790959a168SDaniel Vetter * access.
12800959a168SDaniel Vetter *
12817f0de8d8SDaniel Vetter * Since for most kernel internal dma-buf accesses need the entire buffer, a
12827f0de8d8SDaniel Vetter * vmap interface is introduced. Note that on very old 32-bit architectures
12837f0de8d8SDaniel Vetter * vmalloc space might be limited and result in vmap calls failing.
12840959a168SDaniel Vetter *
12850959a168SDaniel Vetter * Interfaces::
1286de9114ecSDaniel Vetter *
12877938f421SLucas De Marchi * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
12887938f421SLucas De Marchi * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
12890959a168SDaniel Vetter *
12900959a168SDaniel Vetter * The vmap call can fail if there is no vmap support in the exporter, or if
1291de9114ecSDaniel Vetter * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1292de9114ecSDaniel Vetter * count for all vmap access and calls down into the exporter's vmap function
1293de9114ecSDaniel Vetter * only when no vmapping exists, and only unmaps it once. Protection against
1294de9114ecSDaniel Vetter * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
12950959a168SDaniel Vetter *
12960959a168SDaniel Vetter * - For full compatibility on the importer side with existing userspace
12970959a168SDaniel Vetter * interfaces, which might already support mmap'ing buffers. This is needed in
12980959a168SDaniel Vetter * many processing pipelines (e.g. feeding a software rendered image into a
12990959a168SDaniel Vetter * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
13000959a168SDaniel Vetter * framework already supported this and for DMA buffer file descriptors to
13010959a168SDaniel Vetter * replace ION buffers mmap support was needed.
13020959a168SDaniel Vetter *
13030959a168SDaniel Vetter * There is no special interfaces, userspace simply calls mmap on the dma-buf
1304b56ffa58ST.J. Mercier * fd. But like for CPU access there's a need to bracket the actual access,
13050959a168SDaniel Vetter * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
13060959a168SDaniel Vetter * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
13070959a168SDaniel Vetter * be restarted.
13080959a168SDaniel Vetter *
13090959a168SDaniel Vetter * Some systems might need some sort of cache coherency management e.g. when
13100959a168SDaniel Vetter * CPU and GPU domains are being accessed through dma-buf at the same time.
13110959a168SDaniel Vetter * To circumvent this problem there are begin/end coherency markers, that
13120959a168SDaniel Vetter * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
13130959a168SDaniel Vetter * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
13140959a168SDaniel Vetter * sequence would be used like following:
13150959a168SDaniel Vetter *
13160959a168SDaniel Vetter * - mmap dma-buf fd
13170959a168SDaniel Vetter * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
13180959a168SDaniel Vetter * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
13190959a168SDaniel Vetter * want (with the new data being consumed by say the GPU or the scanout
13200959a168SDaniel Vetter * device)
13210959a168SDaniel Vetter * - munmap once you don't need the buffer any more
13220959a168SDaniel Vetter *
13230959a168SDaniel Vetter * For correctness and optimal performance, it is always required to use
13240959a168SDaniel Vetter * SYNC_START and SYNC_END before and after, respectively, when accessing the
13250959a168SDaniel Vetter * mapped address. Userspace cannot rely on coherent access, even when there
13260959a168SDaniel Vetter * are systems where it just works without calling these ioctls.
13270959a168SDaniel Vetter *
13280959a168SDaniel Vetter * - And as a CPU fallback in userspace processing pipelines.
13290959a168SDaniel Vetter *
13300959a168SDaniel Vetter * Similar to the motivation for kernel cpu access it is again important that
13310959a168SDaniel Vetter * the userspace code of a given importing subsystem can use the same
13320959a168SDaniel Vetter * interfaces with a imported dma-buf buffer object as with a native buffer
13330959a168SDaniel Vetter * object. This is especially important for drm where the userspace part of
13340959a168SDaniel Vetter * contemporary OpenGL, X, and other drivers is huge, and reworking them to
13350959a168SDaniel Vetter * use a different way to mmap a buffer rather invasive.
13360959a168SDaniel Vetter *
13370959a168SDaniel Vetter * The assumption in the current dma-buf interfaces is that redirecting the
13380959a168SDaniel Vetter * initial mmap is all that's needed. A survey of some of the existing
13390959a168SDaniel Vetter * subsystems shows that no driver seems to do any nefarious thing like
13400959a168SDaniel Vetter * syncing up with outstanding asynchronous processing on the device or
13410959a168SDaniel Vetter * allocating special resources at fault time. So hopefully this is good
13420959a168SDaniel Vetter * enough, since adding interfaces to intercept pagefaults and allow pte
13430959a168SDaniel Vetter * shootdowns would increase the complexity quite a bit.
13440959a168SDaniel Vetter *
13450959a168SDaniel Vetter * Interface::
134685804b70SDaniel Vetter *
13470959a168SDaniel Vetter * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
13480959a168SDaniel Vetter * unsigned long);
13490959a168SDaniel Vetter *
13500959a168SDaniel Vetter * If the importing subsystem simply provides a special-purpose mmap call to
135185804b70SDaniel Vetter * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
13520959a168SDaniel Vetter * equally achieve that for a dma-buf object.
13530959a168SDaniel Vetter */
13540959a168SDaniel Vetter
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1355ae4e46b1SChris Wilson static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1356ae4e46b1SChris Wilson enum dma_data_direction direction)
1357ae4e46b1SChris Wilson {
1358ae4e46b1SChris Wilson bool write = (direction == DMA_BIDIRECTIONAL ||
1359ae4e46b1SChris Wilson direction == DMA_TO_DEVICE);
136052791eeeSChristian König struct dma_resv *resv = dmabuf->resv;
1361ae4e46b1SChris Wilson long ret;
1362ae4e46b1SChris Wilson
1363ae4e46b1SChris Wilson /* Wait on any implicit rendering fences */
13647bc80a54SChristian König ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
13657bc80a54SChristian König true, MAX_SCHEDULE_TIMEOUT);
1366ae4e46b1SChris Wilson if (ret < 0)
1367ae4e46b1SChris Wilson return ret;
1368ae4e46b1SChris Wilson
1369ae4e46b1SChris Wilson return 0;
1370ae4e46b1SChris Wilson }
137135fac7e3SMaarten Lankhorst
137235fac7e3SMaarten Lankhorst /**
137335fac7e3SMaarten Lankhorst * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
137435fac7e3SMaarten Lankhorst * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
137535fac7e3SMaarten Lankhorst * preparations. Coherency is only guaranteed in the specified range for the
137635fac7e3SMaarten Lankhorst * specified access direction.
137735fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to prepare cpu access for.
1378b56ffa58ST.J. Mercier * @direction: [in] direction of access.
137935fac7e3SMaarten Lankhorst *
13800959a168SDaniel Vetter * After the cpu access is complete the caller should call
1381b56ffa58ST.J. Mercier * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
13820959a168SDaniel Vetter * it guaranteed to be coherent with other DMA access.
13830959a168SDaniel Vetter *
1384de9114ecSDaniel Vetter * This function will also wait for any DMA transactions tracked through
1385de9114ecSDaniel Vetter * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1386de9114ecSDaniel Vetter * synchronization this function will only ensure cache coherency, callers must
1387de9114ecSDaniel Vetter * ensure synchronization with such DMA transactions on their own.
1388de9114ecSDaniel Vetter *
138935fac7e3SMaarten Lankhorst * Can return negative error values, returns 0 on success.
139035fac7e3SMaarten Lankhorst */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1391831e9da7STiago Vignatti int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
139235fac7e3SMaarten Lankhorst enum dma_data_direction direction)
139335fac7e3SMaarten Lankhorst {
139435fac7e3SMaarten Lankhorst int ret = 0;
139535fac7e3SMaarten Lankhorst
139635fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf))
139735fac7e3SMaarten Lankhorst return -EINVAL;
139835fac7e3SMaarten Lankhorst
13998ccf0a29SDaniel Vetter might_lock(&dmabuf->resv->lock.base);
14008ccf0a29SDaniel Vetter
140135fac7e3SMaarten Lankhorst if (dmabuf->ops->begin_cpu_access)
1402831e9da7STiago Vignatti ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
140335fac7e3SMaarten Lankhorst
1404ae4e46b1SChris Wilson /* Ensure that all fences are waited upon - but we first allow
1405ae4e46b1SChris Wilson * the native handler the chance to do so more efficiently if it
1406ae4e46b1SChris Wilson * chooses. A double invocation here will be reasonably cheap no-op.
1407ae4e46b1SChris Wilson */
1408ae4e46b1SChris Wilson if (ret == 0)
1409ae4e46b1SChris Wilson ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1410ae4e46b1SChris Wilson
141135fac7e3SMaarten Lankhorst return ret;
141235fac7e3SMaarten Lankhorst }
141316b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
141435fac7e3SMaarten Lankhorst
141535fac7e3SMaarten Lankhorst /**
141635fac7e3SMaarten Lankhorst * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
141735fac7e3SMaarten Lankhorst * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
141835fac7e3SMaarten Lankhorst * actions. Coherency is only guaranteed in the specified range for the
141935fac7e3SMaarten Lankhorst * specified access direction.
142035fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to complete cpu access for.
1421b56ffa58ST.J. Mercier * @direction: [in] direction of access.
142235fac7e3SMaarten Lankhorst *
14230959a168SDaniel Vetter * This terminates CPU access started with dma_buf_begin_cpu_access().
14240959a168SDaniel Vetter *
142587e332d5SDaniel Vetter * Can return negative error values, returns 0 on success.
142635fac7e3SMaarten Lankhorst */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)142718b862dcSChris Wilson int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
142835fac7e3SMaarten Lankhorst enum dma_data_direction direction)
142935fac7e3SMaarten Lankhorst {
143018b862dcSChris Wilson int ret = 0;
143118b862dcSChris Wilson
143235fac7e3SMaarten Lankhorst WARN_ON(!dmabuf);
143335fac7e3SMaarten Lankhorst
14348ccf0a29SDaniel Vetter might_lock(&dmabuf->resv->lock.base);
14358ccf0a29SDaniel Vetter
143635fac7e3SMaarten Lankhorst if (dmabuf->ops->end_cpu_access)
143718b862dcSChris Wilson ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
143818b862dcSChris Wilson
143918b862dcSChris Wilson return ret;
144035fac7e3SMaarten Lankhorst }
144116b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
144235fac7e3SMaarten Lankhorst
144335fac7e3SMaarten Lankhorst
144435fac7e3SMaarten Lankhorst /**
144535fac7e3SMaarten Lankhorst * dma_buf_mmap - Setup up a userspace mmap with the given vma
144635fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer that should back the vma
144735fac7e3SMaarten Lankhorst * @vma: [in] vma for the mmap
144835fac7e3SMaarten Lankhorst * @pgoff: [in] offset in pages where this mmap should start within the
144935fac7e3SMaarten Lankhorst * dma-buf buffer.
145035fac7e3SMaarten Lankhorst *
145135fac7e3SMaarten Lankhorst * This function adjusts the passed in vma so that it points at the file of the
145235fac7e3SMaarten Lankhorst * dma_buf operation. It also adjusts the starting pgoff and does bounds
145335fac7e3SMaarten Lankhorst * checking on the size of the vma. Then it calls the exporters mmap function to
145435fac7e3SMaarten Lankhorst * set up the mapping.
145535fac7e3SMaarten Lankhorst *
145635fac7e3SMaarten Lankhorst * Can return negative error values, returns 0 on success.
145735fac7e3SMaarten Lankhorst */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)145835fac7e3SMaarten Lankhorst int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
145935fac7e3SMaarten Lankhorst unsigned long pgoff)
146035fac7e3SMaarten Lankhorst {
146135fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf || !vma))
146235fac7e3SMaarten Lankhorst return -EINVAL;
146335fac7e3SMaarten Lankhorst
1464e3a9d6c5SAndrew F. Davis /* check if buffer supports mmap */
1465e3a9d6c5SAndrew F. Davis if (!dmabuf->ops->mmap)
1466e3a9d6c5SAndrew F. Davis return -EINVAL;
1467e3a9d6c5SAndrew F. Davis
146835fac7e3SMaarten Lankhorst /* check for offset overflow */
1469b02da6f8SMuhammad Falak R Wani if (pgoff + vma_pages(vma) < pgoff)
147035fac7e3SMaarten Lankhorst return -EOVERFLOW;
147135fac7e3SMaarten Lankhorst
147235fac7e3SMaarten Lankhorst /* check for overflowing the buffer's size */
1473b02da6f8SMuhammad Falak R Wani if (pgoff + vma_pages(vma) >
147435fac7e3SMaarten Lankhorst dmabuf->size >> PAGE_SHIFT)
147535fac7e3SMaarten Lankhorst return -EINVAL;
147635fac7e3SMaarten Lankhorst
147735fac7e3SMaarten Lankhorst /* readjust the vma */
1478295992fbSChristian König vma_set_file(vma, dmabuf->file);
147935fac7e3SMaarten Lankhorst vma->vm_pgoff = pgoff;
148035fac7e3SMaarten Lankhorst
1481*8021fa16SDmitry Osipenko return dmabuf->ops->mmap(dmabuf, vma);
148235fac7e3SMaarten Lankhorst }
148316b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
148435fac7e3SMaarten Lankhorst
148535fac7e3SMaarten Lankhorst /**
148635fac7e3SMaarten Lankhorst * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
148735fac7e3SMaarten Lankhorst * address space. Same restrictions as for vmap and friends apply.
148835fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to vmap
14896619ccf1SThomas Zimmermann * @map: [out] returns the vmap pointer
149035fac7e3SMaarten Lankhorst *
149135fac7e3SMaarten Lankhorst * This call may fail due to lack of virtual mapping address space.
149235fac7e3SMaarten Lankhorst * These calls are optional in drivers. The intended use for them
149335fac7e3SMaarten Lankhorst * is for mapping objects linear in kernel space for high use objects.
1494de9114ecSDaniel Vetter *
1495de9114ecSDaniel Vetter * To ensure coherency users must call dma_buf_begin_cpu_access() and
1496de9114ecSDaniel Vetter * dma_buf_end_cpu_access() around any cpu access performed through this
1497de9114ecSDaniel Vetter * mapping.
149835fac7e3SMaarten Lankhorst *
14996619ccf1SThomas Zimmermann * Returns 0 on success, or a negative errno code otherwise.
150035fac7e3SMaarten Lankhorst */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)15017938f421SLucas De Marchi int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
150235fac7e3SMaarten Lankhorst {
15037938f421SLucas De Marchi struct iosys_map ptr;
150428743e25SDmitry Osipenko int ret;
15056619ccf1SThomas Zimmermann
15067938f421SLucas De Marchi iosys_map_clear(map);
150735fac7e3SMaarten Lankhorst
150835fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf))
15096619ccf1SThomas Zimmermann return -EINVAL;
151035fac7e3SMaarten Lankhorst
151134c7797fSDmitry Osipenko dma_resv_assert_held(dmabuf->resv);
151234c7797fSDmitry Osipenko
151335fac7e3SMaarten Lankhorst if (!dmabuf->ops->vmap)
15146619ccf1SThomas Zimmermann return -EINVAL;
151535fac7e3SMaarten Lankhorst
151635fac7e3SMaarten Lankhorst if (dmabuf->vmapping_counter) {
151735fac7e3SMaarten Lankhorst dmabuf->vmapping_counter++;
15187938f421SLucas De Marchi BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
15196619ccf1SThomas Zimmermann *map = dmabuf->vmap_ptr;
152028743e25SDmitry Osipenko return 0;
152135fac7e3SMaarten Lankhorst }
152235fac7e3SMaarten Lankhorst
15237938f421SLucas De Marchi BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
152435fac7e3SMaarten Lankhorst
15256619ccf1SThomas Zimmermann ret = dmabuf->ops->vmap(dmabuf, &ptr);
15266619ccf1SThomas Zimmermann if (WARN_ON_ONCE(ret))
152728743e25SDmitry Osipenko return ret;
152835fac7e3SMaarten Lankhorst
15296619ccf1SThomas Zimmermann dmabuf->vmap_ptr = ptr;
153035fac7e3SMaarten Lankhorst dmabuf->vmapping_counter = 1;
153135fac7e3SMaarten Lankhorst
15326619ccf1SThomas Zimmermann *map = dmabuf->vmap_ptr;
15336619ccf1SThomas Zimmermann
153428743e25SDmitry Osipenko return 0;
153535fac7e3SMaarten Lankhorst }
153616b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
153735fac7e3SMaarten Lankhorst
153835fac7e3SMaarten Lankhorst /**
153956e5abbaSDmitry Osipenko * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
154056e5abbaSDmitry Osipenko * address space. Same restrictions as for vmap and friends apply.
154156e5abbaSDmitry Osipenko * @dmabuf: [in] buffer to vmap
154256e5abbaSDmitry Osipenko * @map: [out] returns the vmap pointer
154356e5abbaSDmitry Osipenko *
154456e5abbaSDmitry Osipenko * Unlocked version of dma_buf_vmap()
154556e5abbaSDmitry Osipenko *
154656e5abbaSDmitry Osipenko * Returns 0 on success, or a negative errno code otherwise.
154756e5abbaSDmitry Osipenko */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)154856e5abbaSDmitry Osipenko int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
154956e5abbaSDmitry Osipenko {
155056e5abbaSDmitry Osipenko int ret;
155156e5abbaSDmitry Osipenko
155256e5abbaSDmitry Osipenko iosys_map_clear(map);
155356e5abbaSDmitry Osipenko
155456e5abbaSDmitry Osipenko if (WARN_ON(!dmabuf))
155556e5abbaSDmitry Osipenko return -EINVAL;
155656e5abbaSDmitry Osipenko
155756e5abbaSDmitry Osipenko dma_resv_lock(dmabuf->resv, NULL);
155856e5abbaSDmitry Osipenko ret = dma_buf_vmap(dmabuf, map);
155956e5abbaSDmitry Osipenko dma_resv_unlock(dmabuf->resv);
156056e5abbaSDmitry Osipenko
156156e5abbaSDmitry Osipenko return ret;
156256e5abbaSDmitry Osipenko }
156356e5abbaSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
156456e5abbaSDmitry Osipenko
156556e5abbaSDmitry Osipenko /**
156635fac7e3SMaarten Lankhorst * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
156735fac7e3SMaarten Lankhorst * @dmabuf: [in] buffer to vunmap
156820e76f1aSThomas Zimmermann * @map: [in] vmap pointer to vunmap
156935fac7e3SMaarten Lankhorst */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)15707938f421SLucas De Marchi void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
157135fac7e3SMaarten Lankhorst {
157235fac7e3SMaarten Lankhorst if (WARN_ON(!dmabuf))
157335fac7e3SMaarten Lankhorst return;
157435fac7e3SMaarten Lankhorst
157534c7797fSDmitry Osipenko dma_resv_assert_held(dmabuf->resv);
157634c7797fSDmitry Osipenko
15777938f421SLucas De Marchi BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
157835fac7e3SMaarten Lankhorst BUG_ON(dmabuf->vmapping_counter == 0);
15797938f421SLucas De Marchi BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
158035fac7e3SMaarten Lankhorst
158135fac7e3SMaarten Lankhorst if (--dmabuf->vmapping_counter == 0) {
158235fac7e3SMaarten Lankhorst if (dmabuf->ops->vunmap)
158320e76f1aSThomas Zimmermann dmabuf->ops->vunmap(dmabuf, map);
15847938f421SLucas De Marchi iosys_map_clear(&dmabuf->vmap_ptr);
158535fac7e3SMaarten Lankhorst }
158635fac7e3SMaarten Lankhorst }
158716b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
158835fac7e3SMaarten Lankhorst
158956e5abbaSDmitry Osipenko /**
159056e5abbaSDmitry Osipenko * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
159156e5abbaSDmitry Osipenko * @dmabuf: [in] buffer to vunmap
159256e5abbaSDmitry Osipenko * @map: [in] vmap pointer to vunmap
159356e5abbaSDmitry Osipenko */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)159456e5abbaSDmitry Osipenko void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
159556e5abbaSDmitry Osipenko {
159656e5abbaSDmitry Osipenko if (WARN_ON(!dmabuf))
159756e5abbaSDmitry Osipenko return;
159856e5abbaSDmitry Osipenko
159956e5abbaSDmitry Osipenko dma_resv_lock(dmabuf->resv, NULL);
160056e5abbaSDmitry Osipenko dma_buf_vunmap(dmabuf, map);
160156e5abbaSDmitry Osipenko dma_resv_unlock(dmabuf->resv);
160256e5abbaSDmitry Osipenko }
160356e5abbaSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
160456e5abbaSDmitry Osipenko
160535fac7e3SMaarten Lankhorst #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1606eb0b947eSMathias Krause static int dma_buf_debug_show(struct seq_file *s, void *unused)
160735fac7e3SMaarten Lankhorst {
160835fac7e3SMaarten Lankhorst struct dma_buf *buf_obj;
160935fac7e3SMaarten Lankhorst struct dma_buf_attachment *attach_obj;
161063639d01SChristian König int count = 0, attach_count;
161135fac7e3SMaarten Lankhorst size_t size = 0;
1612680753ddSChristian König int ret;
161335fac7e3SMaarten Lankhorst
161435fac7e3SMaarten Lankhorst ret = mutex_lock_interruptible(&db_list.lock);
161535fac7e3SMaarten Lankhorst
161635fac7e3SMaarten Lankhorst if (ret)
161735fac7e3SMaarten Lankhorst return ret;
161835fac7e3SMaarten Lankhorst
161935fac7e3SMaarten Lankhorst seq_puts(s, "\nDma-buf Objects:\n");
16206c01aa13SYuanzheng Song seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1621ed63bb1dSGreg Hackmann "size", "flags", "mode", "count", "ino");
162235fac7e3SMaarten Lankhorst
162335fac7e3SMaarten Lankhorst list_for_each_entry(buf_obj, &db_list.head, list_node) {
162435fac7e3SMaarten Lankhorst
162515fd552dSChristian König ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
162615fd552dSChristian König if (ret)
1627f45f57ccSChristian König goto error_unlock;
162835fac7e3SMaarten Lankhorst
16298c0fd126SGuangming Cao
16308c0fd126SGuangming Cao spin_lock(&buf_obj->name_lock);
1631bb2bb903SGreg Hackmann seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
163235fac7e3SMaarten Lankhorst buf_obj->size,
163335fac7e3SMaarten Lankhorst buf_obj->file->f_flags, buf_obj->file->f_mode,
1634a1f6dbacSAl Viro file_count(buf_obj->file),
1635ed63bb1dSGreg Hackmann buf_obj->exp_name,
1636bb2bb903SGreg Hackmann file_inode(buf_obj->file)->i_ino,
16376c01aa13SYuanzheng Song buf_obj->name ?: "<none>");
16388c0fd126SGuangming Cao spin_unlock(&buf_obj->name_lock);
163935fac7e3SMaarten Lankhorst
1640a25efb38SChristian König dma_resv_describe(buf_obj->resv, s);
16415eb2c72cSRussell King
164235fac7e3SMaarten Lankhorst seq_puts(s, "\tAttached Devices:\n");
164335fac7e3SMaarten Lankhorst attach_count = 0;
164435fac7e3SMaarten Lankhorst
164535fac7e3SMaarten Lankhorst list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
16469eddb41dSMarkus Elfring seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
164735fac7e3SMaarten Lankhorst attach_count++;
164835fac7e3SMaarten Lankhorst }
164915fd552dSChristian König dma_resv_unlock(buf_obj->resv);
165035fac7e3SMaarten Lankhorst
165135fac7e3SMaarten Lankhorst seq_printf(s, "Total %d devices attached\n\n",
165235fac7e3SMaarten Lankhorst attach_count);
165335fac7e3SMaarten Lankhorst
165435fac7e3SMaarten Lankhorst count++;
165535fac7e3SMaarten Lankhorst size += buf_obj->size;
165635fac7e3SMaarten Lankhorst }
165735fac7e3SMaarten Lankhorst
165835fac7e3SMaarten Lankhorst seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
165935fac7e3SMaarten Lankhorst
166035fac7e3SMaarten Lankhorst mutex_unlock(&db_list.lock);
166135fac7e3SMaarten Lankhorst return 0;
166215fd552dSChristian König
1663f45f57ccSChristian König error_unlock:
166415fd552dSChristian König mutex_unlock(&db_list.lock);
166515fd552dSChristian König return ret;
166635fac7e3SMaarten Lankhorst }
166735fac7e3SMaarten Lankhorst
16682674305aSYangtao Li DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
166935fac7e3SMaarten Lankhorst
167035fac7e3SMaarten Lankhorst static struct dentry *dma_buf_debugfs_dir;
167135fac7e3SMaarten Lankhorst
dma_buf_init_debugfs(void)167235fac7e3SMaarten Lankhorst static int dma_buf_init_debugfs(void)
167335fac7e3SMaarten Lankhorst {
1674bd3e2208SMathias Krause struct dentry *d;
167535fac7e3SMaarten Lankhorst int err = 0;
16765136629dSJagan Teki
1677bd3e2208SMathias Krause d = debugfs_create_dir("dma_buf", NULL);
1678bd3e2208SMathias Krause if (IS_ERR(d))
1679bd3e2208SMathias Krause return PTR_ERR(d);
16805136629dSJagan Teki
1681bd3e2208SMathias Krause dma_buf_debugfs_dir = d;
168235fac7e3SMaarten Lankhorst
1683bd3e2208SMathias Krause d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1684bd3e2208SMathias Krause NULL, &dma_buf_debug_fops);
1685bd3e2208SMathias Krause if (IS_ERR(d)) {
168635fac7e3SMaarten Lankhorst pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1687b7479990SMathias Krause debugfs_remove_recursive(dma_buf_debugfs_dir);
1688b7479990SMathias Krause dma_buf_debugfs_dir = NULL;
1689bd3e2208SMathias Krause err = PTR_ERR(d);
1690b7479990SMathias Krause }
169135fac7e3SMaarten Lankhorst
169235fac7e3SMaarten Lankhorst return err;
169335fac7e3SMaarten Lankhorst }
169435fac7e3SMaarten Lankhorst
dma_buf_uninit_debugfs(void)169535fac7e3SMaarten Lankhorst static void dma_buf_uninit_debugfs(void)
169635fac7e3SMaarten Lankhorst {
169735fac7e3SMaarten Lankhorst debugfs_remove_recursive(dma_buf_debugfs_dir);
169835fac7e3SMaarten Lankhorst }
169935fac7e3SMaarten Lankhorst #else
dma_buf_init_debugfs(void)170035fac7e3SMaarten Lankhorst static inline int dma_buf_init_debugfs(void)
170135fac7e3SMaarten Lankhorst {
170235fac7e3SMaarten Lankhorst return 0;
170335fac7e3SMaarten Lankhorst }
dma_buf_uninit_debugfs(void)170435fac7e3SMaarten Lankhorst static inline void dma_buf_uninit_debugfs(void)
170535fac7e3SMaarten Lankhorst {
170635fac7e3SMaarten Lankhorst }
170735fac7e3SMaarten Lankhorst #endif
170835fac7e3SMaarten Lankhorst
dma_buf_init(void)170935fac7e3SMaarten Lankhorst static int __init dma_buf_init(void)
171035fac7e3SMaarten Lankhorst {
1711bdb8d06dSHridya Valsaraju int ret;
1712bdb8d06dSHridya Valsaraju
1713bdb8d06dSHridya Valsaraju ret = dma_buf_init_sysfs_statistics();
1714bdb8d06dSHridya Valsaraju if (ret)
1715bdb8d06dSHridya Valsaraju return ret;
1716bdb8d06dSHridya Valsaraju
1717ed63bb1dSGreg Hackmann dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1718ed63bb1dSGreg Hackmann if (IS_ERR(dma_buf_mnt))
1719ed63bb1dSGreg Hackmann return PTR_ERR(dma_buf_mnt);
1720ed63bb1dSGreg Hackmann
172135fac7e3SMaarten Lankhorst mutex_init(&db_list.lock);
172235fac7e3SMaarten Lankhorst INIT_LIST_HEAD(&db_list.head);
172335fac7e3SMaarten Lankhorst dma_buf_init_debugfs();
172435fac7e3SMaarten Lankhorst return 0;
172535fac7e3SMaarten Lankhorst }
172635fac7e3SMaarten Lankhorst subsys_initcall(dma_buf_init);
172735fac7e3SMaarten Lankhorst
dma_buf_deinit(void)172835fac7e3SMaarten Lankhorst static void __exit dma_buf_deinit(void)
172935fac7e3SMaarten Lankhorst {
173035fac7e3SMaarten Lankhorst dma_buf_uninit_debugfs();
1731ed63bb1dSGreg Hackmann kern_unmount(dma_buf_mnt);
1732bdb8d06dSHridya Valsaraju dma_buf_uninit_sysfs_statistics();
173335fac7e3SMaarten Lankhorst }
173435fac7e3SMaarten Lankhorst __exitcall(dma_buf_deinit);
1735