xref: /openbmc/linux/drivers/dma-buf/dma-buf.c (revision ae6f2db4d59e9f8c90cb3c2d2a954832898d0f2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/sync_file.h>
24 #include <linux/poll.h>
25 #include <linux/dma-resv.h>
26 #include <linux/mm.h>
27 #include <linux/mount.h>
28 #include <linux/pseudo_fs.h>
29 
30 #include <uapi/linux/dma-buf.h>
31 #include <uapi/linux/magic.h>
32 
33 #include "dma-buf-sysfs-stats.h"
34 
35 static inline int is_dma_buf_file(struct file *);
36 
37 struct dma_buf_list {
38 	struct list_head head;
39 	struct mutex lock;
40 };
41 
42 static struct dma_buf_list db_list;
43 
44 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
45 {
46 	struct dma_buf *dmabuf;
47 	char name[DMA_BUF_NAME_LEN];
48 	size_t ret = 0;
49 
50 	dmabuf = dentry->d_fsdata;
51 	spin_lock(&dmabuf->name_lock);
52 	if (dmabuf->name)
53 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
54 	spin_unlock(&dmabuf->name_lock);
55 
56 	return dynamic_dname(buffer, buflen, "/%s:%s",
57 			     dentry->d_name.name, ret > 0 ? name : "");
58 }
59 
60 static void dma_buf_release(struct dentry *dentry)
61 {
62 	struct dma_buf *dmabuf;
63 
64 	dmabuf = dentry->d_fsdata;
65 	if (unlikely(!dmabuf))
66 		return;
67 
68 	BUG_ON(dmabuf->vmapping_counter);
69 
70 	/*
71 	 * If you hit this BUG() it could mean:
72 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
73 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
74 	 */
75 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
76 
77 	dma_buf_stats_teardown(dmabuf);
78 	dmabuf->ops->release(dmabuf);
79 
80 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
81 		dma_resv_fini(dmabuf->resv);
82 
83 	WARN_ON(!list_empty(&dmabuf->attachments));
84 	module_put(dmabuf->owner);
85 	kfree(dmabuf->name);
86 	kfree(dmabuf);
87 }
88 
89 static int dma_buf_file_release(struct inode *inode, struct file *file)
90 {
91 	struct dma_buf *dmabuf;
92 
93 	if (!is_dma_buf_file(file))
94 		return -EINVAL;
95 
96 	dmabuf = file->private_data;
97 
98 	mutex_lock(&db_list.lock);
99 	list_del(&dmabuf->list_node);
100 	mutex_unlock(&db_list.lock);
101 
102 	return 0;
103 }
104 
105 static const struct dentry_operations dma_buf_dentry_ops = {
106 	.d_dname = dmabuffs_dname,
107 	.d_release = dma_buf_release,
108 };
109 
110 static struct vfsmount *dma_buf_mnt;
111 
112 static int dma_buf_fs_init_context(struct fs_context *fc)
113 {
114 	struct pseudo_fs_context *ctx;
115 
116 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
117 	if (!ctx)
118 		return -ENOMEM;
119 	ctx->dops = &dma_buf_dentry_ops;
120 	return 0;
121 }
122 
123 static struct file_system_type dma_buf_fs_type = {
124 	.name = "dmabuf",
125 	.init_fs_context = dma_buf_fs_init_context,
126 	.kill_sb = kill_anon_super,
127 };
128 
129 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
130 {
131 	struct dma_buf *dmabuf;
132 	int ret;
133 
134 	if (!is_dma_buf_file(file))
135 		return -EINVAL;
136 
137 	dmabuf = file->private_data;
138 
139 	/* check if buffer supports mmap */
140 	if (!dmabuf->ops->mmap)
141 		return -EINVAL;
142 
143 	/* check for overflowing the buffer's size */
144 	if (vma->vm_pgoff + vma_pages(vma) >
145 	    dmabuf->size >> PAGE_SHIFT)
146 		return -EINVAL;
147 
148 	dma_resv_lock(dmabuf->resv, NULL);
149 	ret = dmabuf->ops->mmap(dmabuf, vma);
150 	dma_resv_unlock(dmabuf->resv);
151 
152 	return ret;
153 }
154 
155 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
156 {
157 	struct dma_buf *dmabuf;
158 	loff_t base;
159 
160 	if (!is_dma_buf_file(file))
161 		return -EBADF;
162 
163 	dmabuf = file->private_data;
164 
165 	/* only support discovering the end of the buffer,
166 	   but also allow SEEK_SET to maintain the idiomatic
167 	   SEEK_END(0), SEEK_CUR(0) pattern */
168 	if (whence == SEEK_END)
169 		base = dmabuf->size;
170 	else if (whence == SEEK_SET)
171 		base = 0;
172 	else
173 		return -EINVAL;
174 
175 	if (offset != 0)
176 		return -EINVAL;
177 
178 	return base + offset;
179 }
180 
181 /**
182  * DOC: implicit fence polling
183  *
184  * To support cross-device and cross-driver synchronization of buffer access
185  * implicit fences (represented internally in the kernel with &struct dma_fence)
186  * can be attached to a &dma_buf. The glue for that and a few related things are
187  * provided in the &dma_resv structure.
188  *
189  * Userspace can query the state of these implicitly tracked fences using poll()
190  * and related system calls:
191  *
192  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
193  *   most recent write or exclusive fence.
194  *
195  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
196  *   all attached fences, shared and exclusive ones.
197  *
198  * Note that this only signals the completion of the respective fences, i.e. the
199  * DMA transfers are complete. Cache flushing and any other necessary
200  * preparations before CPU access can begin still need to happen.
201  *
202  * As an alternative to poll(), the set of fences on DMA buffer can be
203  * exported as a &sync_file using &dma_buf_sync_file_export.
204  */
205 
206 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
207 {
208 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
209 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
210 	unsigned long flags;
211 
212 	spin_lock_irqsave(&dcb->poll->lock, flags);
213 	wake_up_locked_poll(dcb->poll, dcb->active);
214 	dcb->active = 0;
215 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
216 	dma_fence_put(fence);
217 	/* Paired with get_file in dma_buf_poll */
218 	fput(dmabuf->file);
219 }
220 
221 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
222 				struct dma_buf_poll_cb_t *dcb)
223 {
224 	struct dma_resv_iter cursor;
225 	struct dma_fence *fence;
226 	int r;
227 
228 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
229 				fence) {
230 		dma_fence_get(fence);
231 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
232 		if (!r)
233 			return true;
234 		dma_fence_put(fence);
235 	}
236 
237 	return false;
238 }
239 
240 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
241 {
242 	struct dma_buf *dmabuf;
243 	struct dma_resv *resv;
244 	__poll_t events;
245 
246 	dmabuf = file->private_data;
247 	if (!dmabuf || !dmabuf->resv)
248 		return EPOLLERR;
249 
250 	resv = dmabuf->resv;
251 
252 	poll_wait(file, &dmabuf->poll, poll);
253 
254 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
255 	if (!events)
256 		return 0;
257 
258 	dma_resv_lock(resv, NULL);
259 
260 	if (events & EPOLLOUT) {
261 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
262 
263 		/* Check that callback isn't busy */
264 		spin_lock_irq(&dmabuf->poll.lock);
265 		if (dcb->active)
266 			events &= ~EPOLLOUT;
267 		else
268 			dcb->active = EPOLLOUT;
269 		spin_unlock_irq(&dmabuf->poll.lock);
270 
271 		if (events & EPOLLOUT) {
272 			/* Paired with fput in dma_buf_poll_cb */
273 			get_file(dmabuf->file);
274 
275 			if (!dma_buf_poll_add_cb(resv, true, dcb))
276 				/* No callback queued, wake up any other waiters */
277 				dma_buf_poll_cb(NULL, &dcb->cb);
278 			else
279 				events &= ~EPOLLOUT;
280 		}
281 	}
282 
283 	if (events & EPOLLIN) {
284 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
285 
286 		/* Check that callback isn't busy */
287 		spin_lock_irq(&dmabuf->poll.lock);
288 		if (dcb->active)
289 			events &= ~EPOLLIN;
290 		else
291 			dcb->active = EPOLLIN;
292 		spin_unlock_irq(&dmabuf->poll.lock);
293 
294 		if (events & EPOLLIN) {
295 			/* Paired with fput in dma_buf_poll_cb */
296 			get_file(dmabuf->file);
297 
298 			if (!dma_buf_poll_add_cb(resv, false, dcb))
299 				/* No callback queued, wake up any other waiters */
300 				dma_buf_poll_cb(NULL, &dcb->cb);
301 			else
302 				events &= ~EPOLLIN;
303 		}
304 	}
305 
306 	dma_resv_unlock(resv);
307 	return events;
308 }
309 
310 /**
311  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
312  * It could support changing the name of the dma-buf if the same
313  * piece of memory is used for multiple purpose between different devices.
314  *
315  * @dmabuf: [in]     dmabuf buffer that will be renamed.
316  * @buf:    [in]     A piece of userspace memory that contains the name of
317  *                   the dma-buf.
318  *
319  * Returns 0 on success. If the dma-buf buffer is already attached to
320  * devices, return -EBUSY.
321  *
322  */
323 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
324 {
325 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
326 
327 	if (IS_ERR(name))
328 		return PTR_ERR(name);
329 
330 	spin_lock(&dmabuf->name_lock);
331 	kfree(dmabuf->name);
332 	dmabuf->name = name;
333 	spin_unlock(&dmabuf->name_lock);
334 
335 	return 0;
336 }
337 
338 #if IS_ENABLED(CONFIG_SYNC_FILE)
339 static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
340 				     void __user *user_data)
341 {
342 	struct dma_buf_export_sync_file arg;
343 	enum dma_resv_usage usage;
344 	struct dma_fence *fence = NULL;
345 	struct sync_file *sync_file;
346 	int fd, ret;
347 
348 	if (copy_from_user(&arg, user_data, sizeof(arg)))
349 		return -EFAULT;
350 
351 	if (arg.flags & ~DMA_BUF_SYNC_RW)
352 		return -EINVAL;
353 
354 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
355 		return -EINVAL;
356 
357 	fd = get_unused_fd_flags(O_CLOEXEC);
358 	if (fd < 0)
359 		return fd;
360 
361 	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
362 	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
363 	if (ret)
364 		goto err_put_fd;
365 
366 	if (!fence)
367 		fence = dma_fence_get_stub();
368 
369 	sync_file = sync_file_create(fence);
370 
371 	dma_fence_put(fence);
372 
373 	if (!sync_file) {
374 		ret = -ENOMEM;
375 		goto err_put_fd;
376 	}
377 
378 	arg.fd = fd;
379 	if (copy_to_user(user_data, &arg, sizeof(arg))) {
380 		ret = -EFAULT;
381 		goto err_put_file;
382 	}
383 
384 	fd_install(fd, sync_file->file);
385 
386 	return 0;
387 
388 err_put_file:
389 	fput(sync_file->file);
390 err_put_fd:
391 	put_unused_fd(fd);
392 	return ret;
393 }
394 
395 static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
396 				     const void __user *user_data)
397 {
398 	struct dma_buf_import_sync_file arg;
399 	struct dma_fence *fence;
400 	enum dma_resv_usage usage;
401 	int ret = 0;
402 
403 	if (copy_from_user(&arg, user_data, sizeof(arg)))
404 		return -EFAULT;
405 
406 	if (arg.flags & ~DMA_BUF_SYNC_RW)
407 		return -EINVAL;
408 
409 	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
410 		return -EINVAL;
411 
412 	fence = sync_file_get_fence(arg.fd);
413 	if (!fence)
414 		return -EINVAL;
415 
416 	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
417 						   DMA_RESV_USAGE_READ;
418 
419 	dma_resv_lock(dmabuf->resv, NULL);
420 
421 	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
422 	if (!ret)
423 		dma_resv_add_fence(dmabuf->resv, fence, usage);
424 
425 	dma_resv_unlock(dmabuf->resv);
426 
427 	dma_fence_put(fence);
428 
429 	return ret;
430 }
431 #endif
432 
433 static long dma_buf_ioctl(struct file *file,
434 			  unsigned int cmd, unsigned long arg)
435 {
436 	struct dma_buf *dmabuf;
437 	struct dma_buf_sync sync;
438 	enum dma_data_direction direction;
439 	int ret;
440 
441 	dmabuf = file->private_data;
442 
443 	switch (cmd) {
444 	case DMA_BUF_IOCTL_SYNC:
445 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
446 			return -EFAULT;
447 
448 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
449 			return -EINVAL;
450 
451 		switch (sync.flags & DMA_BUF_SYNC_RW) {
452 		case DMA_BUF_SYNC_READ:
453 			direction = DMA_FROM_DEVICE;
454 			break;
455 		case DMA_BUF_SYNC_WRITE:
456 			direction = DMA_TO_DEVICE;
457 			break;
458 		case DMA_BUF_SYNC_RW:
459 			direction = DMA_BIDIRECTIONAL;
460 			break;
461 		default:
462 			return -EINVAL;
463 		}
464 
465 		if (sync.flags & DMA_BUF_SYNC_END)
466 			ret = dma_buf_end_cpu_access(dmabuf, direction);
467 		else
468 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
469 
470 		return ret;
471 
472 	case DMA_BUF_SET_NAME_A:
473 	case DMA_BUF_SET_NAME_B:
474 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
475 
476 #if IS_ENABLED(CONFIG_SYNC_FILE)
477 	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
478 		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
479 	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
480 		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
481 #endif
482 
483 	default:
484 		return -ENOTTY;
485 	}
486 }
487 
488 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
489 {
490 	struct dma_buf *dmabuf = file->private_data;
491 
492 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
493 	/* Don't count the temporary reference taken inside procfs seq_show */
494 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
495 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
496 	spin_lock(&dmabuf->name_lock);
497 	if (dmabuf->name)
498 		seq_printf(m, "name:\t%s\n", dmabuf->name);
499 	spin_unlock(&dmabuf->name_lock);
500 }
501 
502 static const struct file_operations dma_buf_fops = {
503 	.release	= dma_buf_file_release,
504 	.mmap		= dma_buf_mmap_internal,
505 	.llseek		= dma_buf_llseek,
506 	.poll		= dma_buf_poll,
507 	.unlocked_ioctl	= dma_buf_ioctl,
508 	.compat_ioctl	= compat_ptr_ioctl,
509 	.show_fdinfo	= dma_buf_show_fdinfo,
510 };
511 
512 /*
513  * is_dma_buf_file - Check if struct file* is associated with dma_buf
514  */
515 static inline int is_dma_buf_file(struct file *file)
516 {
517 	return file->f_op == &dma_buf_fops;
518 }
519 
520 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
521 {
522 	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
523 	struct file *file;
524 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
525 
526 	if (IS_ERR(inode))
527 		return ERR_CAST(inode);
528 
529 	inode->i_size = dmabuf->size;
530 	inode_set_bytes(inode, dmabuf->size);
531 
532 	/*
533 	 * The ->i_ino acquired from get_next_ino() is not unique thus
534 	 * not suitable for using it as dentry name by dmabuf stats.
535 	 * Override ->i_ino with the unique and dmabuffs specific
536 	 * value.
537 	 */
538 	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
539 	flags &= O_ACCMODE | O_NONBLOCK;
540 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
541 				 flags, &dma_buf_fops);
542 	if (IS_ERR(file))
543 		goto err_alloc_file;
544 	file->private_data = dmabuf;
545 	file->f_path.dentry->d_fsdata = dmabuf;
546 
547 	return file;
548 
549 err_alloc_file:
550 	iput(inode);
551 	return file;
552 }
553 
554 /**
555  * DOC: dma buf device access
556  *
557  * For device DMA access to a shared DMA buffer the usual sequence of operations
558  * is fairly simple:
559  *
560  * 1. The exporter defines his exporter instance using
561  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
562  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
563  *    as a file descriptor by calling dma_buf_fd().
564  *
565  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
566  *    to share with: First the file descriptor is converted to a &dma_buf using
567  *    dma_buf_get(). Then the buffer is attached to the device using
568  *    dma_buf_attach().
569  *
570  *    Up to this stage the exporter is still free to migrate or reallocate the
571  *    backing storage.
572  *
573  * 3. Once the buffer is attached to all devices userspace can initiate DMA
574  *    access to the shared buffer. In the kernel this is done by calling
575  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
576  *
577  * 4. Once a driver is done with a shared buffer it needs to call
578  *    dma_buf_detach() (after cleaning up any mappings) and then release the
579  *    reference acquired with dma_buf_get() by calling dma_buf_put().
580  *
581  * For the detailed semantics exporters are expected to implement see
582  * &dma_buf_ops.
583  */
584 
585 /**
586  * dma_buf_export - Creates a new dma_buf, and associates an anon file
587  * with this buffer, so it can be exported.
588  * Also connect the allocator specific data and ops to the buffer.
589  * Additionally, provide a name string for exporter; useful in debugging.
590  *
591  * @exp_info:	[in]	holds all the export related information provided
592  *			by the exporter. see &struct dma_buf_export_info
593  *			for further details.
594  *
595  * Returns, on success, a newly created struct dma_buf object, which wraps the
596  * supplied private data and operations for struct dma_buf_ops. On either
597  * missing ops, or error in allocating struct dma_buf, will return negative
598  * error.
599  *
600  * For most cases the easiest way to create @exp_info is through the
601  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
602  */
603 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
604 {
605 	struct dma_buf *dmabuf;
606 	struct dma_resv *resv = exp_info->resv;
607 	struct file *file;
608 	size_t alloc_size = sizeof(struct dma_buf);
609 	int ret;
610 
611 	if (!exp_info->resv)
612 		alloc_size += sizeof(struct dma_resv);
613 	else
614 		/* prevent &dma_buf[1] == dma_buf->resv */
615 		alloc_size += 1;
616 
617 	if (WARN_ON(!exp_info->priv
618 			  || !exp_info->ops
619 			  || !exp_info->ops->map_dma_buf
620 			  || !exp_info->ops->unmap_dma_buf
621 			  || !exp_info->ops->release)) {
622 		return ERR_PTR(-EINVAL);
623 	}
624 
625 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
626 		    (exp_info->ops->pin || exp_info->ops->unpin)))
627 		return ERR_PTR(-EINVAL);
628 
629 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
630 		return ERR_PTR(-EINVAL);
631 
632 	if (!try_module_get(exp_info->owner))
633 		return ERR_PTR(-ENOENT);
634 
635 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
636 	if (!dmabuf) {
637 		ret = -ENOMEM;
638 		goto err_module;
639 	}
640 
641 	dmabuf->priv = exp_info->priv;
642 	dmabuf->ops = exp_info->ops;
643 	dmabuf->size = exp_info->size;
644 	dmabuf->exp_name = exp_info->exp_name;
645 	dmabuf->owner = exp_info->owner;
646 	spin_lock_init(&dmabuf->name_lock);
647 	init_waitqueue_head(&dmabuf->poll);
648 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
649 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
650 
651 	if (!resv) {
652 		resv = (struct dma_resv *)&dmabuf[1];
653 		dma_resv_init(resv);
654 	}
655 	dmabuf->resv = resv;
656 
657 	file = dma_buf_getfile(dmabuf, exp_info->flags);
658 	if (IS_ERR(file)) {
659 		ret = PTR_ERR(file);
660 		goto err_dmabuf;
661 	}
662 
663 	dmabuf->file = file;
664 
665 	INIT_LIST_HEAD(&dmabuf->attachments);
666 
667 	mutex_lock(&db_list.lock);
668 	list_add(&dmabuf->list_node, &db_list.head);
669 	mutex_unlock(&db_list.lock);
670 
671 	ret = dma_buf_stats_setup(dmabuf);
672 	if (ret)
673 		goto err_sysfs;
674 
675 	return dmabuf;
676 
677 err_sysfs:
678 	/*
679 	 * Set file->f_path.dentry->d_fsdata to NULL so that when
680 	 * dma_buf_release() gets invoked by dentry_ops, it exits
681 	 * early before calling the release() dma_buf op.
682 	 */
683 	file->f_path.dentry->d_fsdata = NULL;
684 	fput(file);
685 err_dmabuf:
686 	kfree(dmabuf);
687 err_module:
688 	module_put(exp_info->owner);
689 	return ERR_PTR(ret);
690 }
691 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
692 
693 /**
694  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
695  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
696  * @flags:      [in]    flags to give to fd
697  *
698  * On success, returns an associated 'fd'. Else, returns error.
699  */
700 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
701 {
702 	int fd;
703 
704 	if (!dmabuf || !dmabuf->file)
705 		return -EINVAL;
706 
707 	fd = get_unused_fd_flags(flags);
708 	if (fd < 0)
709 		return fd;
710 
711 	fd_install(fd, dmabuf->file);
712 
713 	return fd;
714 }
715 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
716 
717 /**
718  * dma_buf_get - returns the struct dma_buf related to an fd
719  * @fd:	[in]	fd associated with the struct dma_buf to be returned
720  *
721  * On success, returns the struct dma_buf associated with an fd; uses
722  * file's refcounting done by fget to increase refcount. returns ERR_PTR
723  * otherwise.
724  */
725 struct dma_buf *dma_buf_get(int fd)
726 {
727 	struct file *file;
728 
729 	file = fget(fd);
730 
731 	if (!file)
732 		return ERR_PTR(-EBADF);
733 
734 	if (!is_dma_buf_file(file)) {
735 		fput(file);
736 		return ERR_PTR(-EINVAL);
737 	}
738 
739 	return file->private_data;
740 }
741 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
742 
743 /**
744  * dma_buf_put - decreases refcount of the buffer
745  * @dmabuf:	[in]	buffer to reduce refcount of
746  *
747  * Uses file's refcounting done implicitly by fput().
748  *
749  * If, as a result of this call, the refcount becomes 0, the 'release' file
750  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
751  * in turn, and frees the memory allocated for dmabuf when exported.
752  */
753 void dma_buf_put(struct dma_buf *dmabuf)
754 {
755 	if (WARN_ON(!dmabuf || !dmabuf->file))
756 		return;
757 
758 	fput(dmabuf->file);
759 }
760 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
761 
762 static void mangle_sg_table(struct sg_table *sg_table)
763 {
764 #ifdef CONFIG_DMABUF_DEBUG
765 	int i;
766 	struct scatterlist *sg;
767 
768 	/* To catch abuse of the underlying struct page by importers mix
769 	 * up the bits, but take care to preserve the low SG_ bits to
770 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
771 	 * before passing the sgt back to the exporter. */
772 	for_each_sgtable_sg(sg_table, sg, i)
773 		sg->page_link ^= ~0xffUL;
774 #endif
775 
776 }
777 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
778 				       enum dma_data_direction direction)
779 {
780 	struct sg_table *sg_table;
781 	signed long ret;
782 
783 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
784 	if (IS_ERR_OR_NULL(sg_table))
785 		return sg_table;
786 
787 	if (!dma_buf_attachment_is_dynamic(attach)) {
788 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
789 					    DMA_RESV_USAGE_KERNEL, true,
790 					    MAX_SCHEDULE_TIMEOUT);
791 		if (ret < 0) {
792 			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
793 							   direction);
794 			return ERR_PTR(ret);
795 		}
796 	}
797 
798 	mangle_sg_table(sg_table);
799 	return sg_table;
800 }
801 
802 /**
803  * DOC: locking convention
804  *
805  * In order to avoid deadlock situations between dma-buf exports and importers,
806  * all dma-buf API users must follow the common dma-buf locking convention.
807  *
808  * Convention for importers
809  *
810  * 1. Importers must hold the dma-buf reservation lock when calling these
811  *    functions:
812  *
813  *     - dma_buf_pin()
814  *     - dma_buf_unpin()
815  *     - dma_buf_map_attachment()
816  *     - dma_buf_unmap_attachment()
817  *     - dma_buf_vmap()
818  *     - dma_buf_vunmap()
819  *
820  * 2. Importers must not hold the dma-buf reservation lock when calling these
821  *    functions:
822  *
823  *     - dma_buf_attach()
824  *     - dma_buf_dynamic_attach()
825  *     - dma_buf_detach()
826  *     - dma_buf_export(
827  *     - dma_buf_fd()
828  *     - dma_buf_get()
829  *     - dma_buf_put()
830  *     - dma_buf_mmap()
831  *     - dma_buf_begin_cpu_access()
832  *     - dma_buf_end_cpu_access()
833  *     - dma_buf_map_attachment_unlocked()
834  *     - dma_buf_unmap_attachment_unlocked()
835  *     - dma_buf_vmap_unlocked()
836  *     - dma_buf_vunmap_unlocked()
837  *
838  * Convention for exporters
839  *
840  * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
841  *    reservation and exporter can take the lock:
842  *
843  *     - &dma_buf_ops.attach()
844  *     - &dma_buf_ops.detach()
845  *     - &dma_buf_ops.release()
846  *     - &dma_buf_ops.begin_cpu_access()
847  *     - &dma_buf_ops.end_cpu_access()
848  *
849  * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
850  *    reservation and exporter can't take the lock:
851  *
852  *     - &dma_buf_ops.pin()
853  *     - &dma_buf_ops.unpin()
854  *     - &dma_buf_ops.map_dma_buf()
855  *     - &dma_buf_ops.unmap_dma_buf()
856  *     - &dma_buf_ops.mmap()
857  *     - &dma_buf_ops.vmap()
858  *     - &dma_buf_ops.vunmap()
859  *
860  * 3. Exporters must hold the dma-buf reservation lock when calling these
861  *    functions:
862  *
863  *     - dma_buf_move_notify()
864  */
865 
866 /**
867  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
868  * @dmabuf:		[in]	buffer to attach device to.
869  * @dev:		[in]	device to be attached.
870  * @importer_ops:	[in]	importer operations for the attachment
871  * @importer_priv:	[in]	importer private pointer for the attachment
872  *
873  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
874  * must be cleaned up by calling dma_buf_detach().
875  *
876  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
877  * functionality.
878  *
879  * Returns:
880  *
881  * A pointer to newly created &dma_buf_attachment on success, or a negative
882  * error code wrapped into a pointer on failure.
883  *
884  * Note that this can fail if the backing storage of @dmabuf is in a place not
885  * accessible to @dev, and cannot be moved to a more suitable place. This is
886  * indicated with the error code -EBUSY.
887  */
888 struct dma_buf_attachment *
889 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
890 		       const struct dma_buf_attach_ops *importer_ops,
891 		       void *importer_priv)
892 {
893 	struct dma_buf_attachment *attach;
894 	int ret;
895 
896 	if (WARN_ON(!dmabuf || !dev))
897 		return ERR_PTR(-EINVAL);
898 
899 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
900 		return ERR_PTR(-EINVAL);
901 
902 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
903 	if (!attach)
904 		return ERR_PTR(-ENOMEM);
905 
906 	attach->dev = dev;
907 	attach->dmabuf = dmabuf;
908 	if (importer_ops)
909 		attach->peer2peer = importer_ops->allow_peer2peer;
910 	attach->importer_ops = importer_ops;
911 	attach->importer_priv = importer_priv;
912 
913 	if (dmabuf->ops->attach) {
914 		ret = dmabuf->ops->attach(dmabuf, attach);
915 		if (ret)
916 			goto err_attach;
917 	}
918 	dma_resv_lock(dmabuf->resv, NULL);
919 	list_add(&attach->node, &dmabuf->attachments);
920 	dma_resv_unlock(dmabuf->resv);
921 
922 	/* When either the importer or the exporter can't handle dynamic
923 	 * mappings we cache the mapping here to avoid issues with the
924 	 * reservation object lock.
925 	 */
926 	if (dma_buf_attachment_is_dynamic(attach) !=
927 	    dma_buf_is_dynamic(dmabuf)) {
928 		struct sg_table *sgt;
929 
930 		dma_resv_lock(attach->dmabuf->resv, NULL);
931 		if (dma_buf_is_dynamic(attach->dmabuf)) {
932 			ret = dmabuf->ops->pin(attach);
933 			if (ret)
934 				goto err_unlock;
935 		}
936 
937 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
938 		if (!sgt)
939 			sgt = ERR_PTR(-ENOMEM);
940 		if (IS_ERR(sgt)) {
941 			ret = PTR_ERR(sgt);
942 			goto err_unpin;
943 		}
944 		dma_resv_unlock(attach->dmabuf->resv);
945 		attach->sgt = sgt;
946 		attach->dir = DMA_BIDIRECTIONAL;
947 	}
948 
949 	return attach;
950 
951 err_attach:
952 	kfree(attach);
953 	return ERR_PTR(ret);
954 
955 err_unpin:
956 	if (dma_buf_is_dynamic(attach->dmabuf))
957 		dmabuf->ops->unpin(attach);
958 
959 err_unlock:
960 	dma_resv_unlock(attach->dmabuf->resv);
961 
962 	dma_buf_detach(dmabuf, attach);
963 	return ERR_PTR(ret);
964 }
965 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
966 
967 /**
968  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
969  * @dmabuf:	[in]	buffer to attach device to.
970  * @dev:	[in]	device to be attached.
971  *
972  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
973  * mapping.
974  */
975 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
976 					  struct device *dev)
977 {
978 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
979 }
980 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
981 
982 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
983 			    struct sg_table *sg_table,
984 			    enum dma_data_direction direction)
985 {
986 	/* uses XOR, hence this unmangles */
987 	mangle_sg_table(sg_table);
988 
989 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
990 }
991 
992 /**
993  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
994  * @dmabuf:	[in]	buffer to detach from.
995  * @attach:	[in]	attachment to be detached; is free'd after this call.
996  *
997  * Clean up a device attachment obtained by calling dma_buf_attach().
998  *
999  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1000  */
1001 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1002 {
1003 	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1004 		return;
1005 
1006 	dma_resv_lock(dmabuf->resv, NULL);
1007 
1008 	if (attach->sgt) {
1009 
1010 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
1011 
1012 		if (dma_buf_is_dynamic(attach->dmabuf))
1013 			dmabuf->ops->unpin(attach);
1014 	}
1015 	list_del(&attach->node);
1016 
1017 	dma_resv_unlock(dmabuf->resv);
1018 
1019 	if (dmabuf->ops->detach)
1020 		dmabuf->ops->detach(dmabuf, attach);
1021 
1022 	kfree(attach);
1023 }
1024 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1025 
1026 /**
1027  * dma_buf_pin - Lock down the DMA-buf
1028  * @attach:	[in]	attachment which should be pinned
1029  *
1030  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1031  * call this, and only for limited use cases like scanout and not for temporary
1032  * pin operations. It is not permitted to allow userspace to pin arbitrary
1033  * amounts of buffers through this interface.
1034  *
1035  * Buffers must be unpinned by calling dma_buf_unpin().
1036  *
1037  * Returns:
1038  * 0 on success, negative error code on failure.
1039  */
1040 int dma_buf_pin(struct dma_buf_attachment *attach)
1041 {
1042 	struct dma_buf *dmabuf = attach->dmabuf;
1043 	int ret = 0;
1044 
1045 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1046 
1047 	dma_resv_assert_held(dmabuf->resv);
1048 
1049 	if (dmabuf->ops->pin)
1050 		ret = dmabuf->ops->pin(attach);
1051 
1052 	return ret;
1053 }
1054 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1055 
1056 /**
1057  * dma_buf_unpin - Unpin a DMA-buf
1058  * @attach:	[in]	attachment which should be unpinned
1059  *
1060  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1061  * any mapping of @attach again and inform the importer through
1062  * &dma_buf_attach_ops.move_notify.
1063  */
1064 void dma_buf_unpin(struct dma_buf_attachment *attach)
1065 {
1066 	struct dma_buf *dmabuf = attach->dmabuf;
1067 
1068 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1069 
1070 	dma_resv_assert_held(dmabuf->resv);
1071 
1072 	if (dmabuf->ops->unpin)
1073 		dmabuf->ops->unpin(attach);
1074 }
1075 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1076 
1077 /**
1078  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1079  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1080  * dma_buf_ops.
1081  * @attach:	[in]	attachment whose scatterlist is to be returned
1082  * @direction:	[in]	direction of DMA transfer
1083  *
1084  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1085  * on error. May return -EINTR if it is interrupted by a signal.
1086  *
1087  * On success, the DMA addresses and lengths in the returned scatterlist are
1088  * PAGE_SIZE aligned.
1089  *
1090  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1091  * the underlying backing storage is pinned for as long as a mapping exists,
1092  * therefore users/importers should not hold onto a mapping for undue amounts of
1093  * time.
1094  *
1095  * Important: Dynamic importers must wait for the exclusive fence of the struct
1096  * dma_resv attached to the DMA-BUF first.
1097  */
1098 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1099 					enum dma_data_direction direction)
1100 {
1101 	struct sg_table *sg_table;
1102 	int r;
1103 
1104 	might_sleep();
1105 
1106 	if (WARN_ON(!attach || !attach->dmabuf))
1107 		return ERR_PTR(-EINVAL);
1108 
1109 	dma_resv_assert_held(attach->dmabuf->resv);
1110 
1111 	if (attach->sgt) {
1112 		/*
1113 		 * Two mappings with different directions for the same
1114 		 * attachment are not allowed.
1115 		 */
1116 		if (attach->dir != direction &&
1117 		    attach->dir != DMA_BIDIRECTIONAL)
1118 			return ERR_PTR(-EBUSY);
1119 
1120 		return attach->sgt;
1121 	}
1122 
1123 	if (dma_buf_is_dynamic(attach->dmabuf)) {
1124 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1125 			r = attach->dmabuf->ops->pin(attach);
1126 			if (r)
1127 				return ERR_PTR(r);
1128 		}
1129 	}
1130 
1131 	sg_table = __map_dma_buf(attach, direction);
1132 	if (!sg_table)
1133 		sg_table = ERR_PTR(-ENOMEM);
1134 
1135 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1136 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1137 		attach->dmabuf->ops->unpin(attach);
1138 
1139 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1140 		attach->sgt = sg_table;
1141 		attach->dir = direction;
1142 	}
1143 
1144 #ifdef CONFIG_DMA_API_DEBUG
1145 	if (!IS_ERR(sg_table)) {
1146 		struct scatterlist *sg;
1147 		u64 addr;
1148 		int len;
1149 		int i;
1150 
1151 		for_each_sgtable_dma_sg(sg_table, sg, i) {
1152 			addr = sg_dma_address(sg);
1153 			len = sg_dma_len(sg);
1154 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1155 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1156 					 __func__, addr, len);
1157 			}
1158 		}
1159 	}
1160 #endif /* CONFIG_DMA_API_DEBUG */
1161 	return sg_table;
1162 }
1163 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1164 
1165 /**
1166  * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1167  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1168  * dma_buf_ops.
1169  * @attach:	[in]	attachment whose scatterlist is to be returned
1170  * @direction:	[in]	direction of DMA transfer
1171  *
1172  * Unlocked variant of dma_buf_map_attachment().
1173  */
1174 struct sg_table *
1175 dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1176 				enum dma_data_direction direction)
1177 {
1178 	struct sg_table *sg_table;
1179 
1180 	might_sleep();
1181 
1182 	if (WARN_ON(!attach || !attach->dmabuf))
1183 		return ERR_PTR(-EINVAL);
1184 
1185 	dma_resv_lock(attach->dmabuf->resv, NULL);
1186 	sg_table = dma_buf_map_attachment(attach, direction);
1187 	dma_resv_unlock(attach->dmabuf->resv);
1188 
1189 	return sg_table;
1190 }
1191 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1192 
1193 /**
1194  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1195  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1196  * dma_buf_ops.
1197  * @attach:	[in]	attachment to unmap buffer from
1198  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1199  * @direction:  [in]    direction of DMA transfer
1200  *
1201  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1202  */
1203 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1204 				struct sg_table *sg_table,
1205 				enum dma_data_direction direction)
1206 {
1207 	might_sleep();
1208 
1209 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1210 		return;
1211 
1212 	dma_resv_assert_held(attach->dmabuf->resv);
1213 
1214 	if (attach->sgt == sg_table)
1215 		return;
1216 
1217 	__unmap_dma_buf(attach, sg_table, direction);
1218 
1219 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1220 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1221 		dma_buf_unpin(attach);
1222 }
1223 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1224 
1225 /**
1226  * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1227  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1228  * dma_buf_ops.
1229  * @attach:	[in]	attachment to unmap buffer from
1230  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1231  * @direction:	[in]	direction of DMA transfer
1232  *
1233  * Unlocked variant of dma_buf_unmap_attachment().
1234  */
1235 void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1236 				       struct sg_table *sg_table,
1237 				       enum dma_data_direction direction)
1238 {
1239 	might_sleep();
1240 
1241 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1242 		return;
1243 
1244 	dma_resv_lock(attach->dmabuf->resv, NULL);
1245 	dma_buf_unmap_attachment(attach, sg_table, direction);
1246 	dma_resv_unlock(attach->dmabuf->resv);
1247 }
1248 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1249 
1250 /**
1251  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1252  *
1253  * @dmabuf:	[in]	buffer which is moving
1254  *
1255  * Informs all attachmenst that they need to destroy and recreated all their
1256  * mappings.
1257  */
1258 void dma_buf_move_notify(struct dma_buf *dmabuf)
1259 {
1260 	struct dma_buf_attachment *attach;
1261 
1262 	dma_resv_assert_held(dmabuf->resv);
1263 
1264 	list_for_each_entry(attach, &dmabuf->attachments, node)
1265 		if (attach->importer_ops)
1266 			attach->importer_ops->move_notify(attach);
1267 }
1268 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1269 
1270 /**
1271  * DOC: cpu access
1272  *
1273  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1274  *
1275  * - Fallback operations in the kernel, for example when a device is connected
1276  *   over USB and the kernel needs to shuffle the data around first before
1277  *   sending it away. Cache coherency is handled by braketing any transactions
1278  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1279  *   access.
1280  *
1281  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1282  *   vmap interface is introduced. Note that on very old 32-bit architectures
1283  *   vmalloc space might be limited and result in vmap calls failing.
1284  *
1285  *   Interfaces::
1286  *
1287  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1288  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1289  *
1290  *   The vmap call can fail if there is no vmap support in the exporter, or if
1291  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1292  *   count for all vmap access and calls down into the exporter's vmap function
1293  *   only when no vmapping exists, and only unmaps it once. Protection against
1294  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1295  *
1296  * - For full compatibility on the importer side with existing userspace
1297  *   interfaces, which might already support mmap'ing buffers. This is needed in
1298  *   many processing pipelines (e.g. feeding a software rendered image into a
1299  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1300  *   framework already supported this and for DMA buffer file descriptors to
1301  *   replace ION buffers mmap support was needed.
1302  *
1303  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1304  *   fd. But like for CPU access there's a need to braket the actual access,
1305  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1306  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1307  *   be restarted.
1308  *
1309  *   Some systems might need some sort of cache coherency management e.g. when
1310  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1311  *   To circumvent this problem there are begin/end coherency markers, that
1312  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1313  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1314  *   sequence would be used like following:
1315  *
1316  *     - mmap dma-buf fd
1317  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1318  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1319  *       want (with the new data being consumed by say the GPU or the scanout
1320  *       device)
1321  *     - munmap once you don't need the buffer any more
1322  *
1323  *    For correctness and optimal performance, it is always required to use
1324  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1325  *    mapped address. Userspace cannot rely on coherent access, even when there
1326  *    are systems where it just works without calling these ioctls.
1327  *
1328  * - And as a CPU fallback in userspace processing pipelines.
1329  *
1330  *   Similar to the motivation for kernel cpu access it is again important that
1331  *   the userspace code of a given importing subsystem can use the same
1332  *   interfaces with a imported dma-buf buffer object as with a native buffer
1333  *   object. This is especially important for drm where the userspace part of
1334  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1335  *   use a different way to mmap a buffer rather invasive.
1336  *
1337  *   The assumption in the current dma-buf interfaces is that redirecting the
1338  *   initial mmap is all that's needed. A survey of some of the existing
1339  *   subsystems shows that no driver seems to do any nefarious thing like
1340  *   syncing up with outstanding asynchronous processing on the device or
1341  *   allocating special resources at fault time. So hopefully this is good
1342  *   enough, since adding interfaces to intercept pagefaults and allow pte
1343  *   shootdowns would increase the complexity quite a bit.
1344  *
1345  *   Interface::
1346  *
1347  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1348  *		       unsigned long);
1349  *
1350  *   If the importing subsystem simply provides a special-purpose mmap call to
1351  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1352  *   equally achieve that for a dma-buf object.
1353  */
1354 
1355 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1356 				      enum dma_data_direction direction)
1357 {
1358 	bool write = (direction == DMA_BIDIRECTIONAL ||
1359 		      direction == DMA_TO_DEVICE);
1360 	struct dma_resv *resv = dmabuf->resv;
1361 	long ret;
1362 
1363 	/* Wait on any implicit rendering fences */
1364 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1365 				    true, MAX_SCHEDULE_TIMEOUT);
1366 	if (ret < 0)
1367 		return ret;
1368 
1369 	return 0;
1370 }
1371 
1372 /**
1373  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1374  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1375  * preparations. Coherency is only guaranteed in the specified range for the
1376  * specified access direction.
1377  * @dmabuf:	[in]	buffer to prepare cpu access for.
1378  * @direction:	[in]	length of range for cpu access.
1379  *
1380  * After the cpu access is complete the caller should call
1381  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1382  * it guaranteed to be coherent with other DMA access.
1383  *
1384  * This function will also wait for any DMA transactions tracked through
1385  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1386  * synchronization this function will only ensure cache coherency, callers must
1387  * ensure synchronization with such DMA transactions on their own.
1388  *
1389  * Can return negative error values, returns 0 on success.
1390  */
1391 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1392 			     enum dma_data_direction direction)
1393 {
1394 	int ret = 0;
1395 
1396 	if (WARN_ON(!dmabuf))
1397 		return -EINVAL;
1398 
1399 	might_lock(&dmabuf->resv->lock.base);
1400 
1401 	if (dmabuf->ops->begin_cpu_access)
1402 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1403 
1404 	/* Ensure that all fences are waited upon - but we first allow
1405 	 * the native handler the chance to do so more efficiently if it
1406 	 * chooses. A double invocation here will be reasonably cheap no-op.
1407 	 */
1408 	if (ret == 0)
1409 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1410 
1411 	return ret;
1412 }
1413 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1414 
1415 /**
1416  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1417  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1418  * actions. Coherency is only guaranteed in the specified range for the
1419  * specified access direction.
1420  * @dmabuf:	[in]	buffer to complete cpu access for.
1421  * @direction:	[in]	length of range for cpu access.
1422  *
1423  * This terminates CPU access started with dma_buf_begin_cpu_access().
1424  *
1425  * Can return negative error values, returns 0 on success.
1426  */
1427 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1428 			   enum dma_data_direction direction)
1429 {
1430 	int ret = 0;
1431 
1432 	WARN_ON(!dmabuf);
1433 
1434 	might_lock(&dmabuf->resv->lock.base);
1435 
1436 	if (dmabuf->ops->end_cpu_access)
1437 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1438 
1439 	return ret;
1440 }
1441 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1442 
1443 
1444 /**
1445  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1446  * @dmabuf:	[in]	buffer that should back the vma
1447  * @vma:	[in]	vma for the mmap
1448  * @pgoff:	[in]	offset in pages where this mmap should start within the
1449  *			dma-buf buffer.
1450  *
1451  * This function adjusts the passed in vma so that it points at the file of the
1452  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1453  * checking on the size of the vma. Then it calls the exporters mmap function to
1454  * set up the mapping.
1455  *
1456  * Can return negative error values, returns 0 on success.
1457  */
1458 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1459 		 unsigned long pgoff)
1460 {
1461 	int ret;
1462 
1463 	if (WARN_ON(!dmabuf || !vma))
1464 		return -EINVAL;
1465 
1466 	/* check if buffer supports mmap */
1467 	if (!dmabuf->ops->mmap)
1468 		return -EINVAL;
1469 
1470 	/* check for offset overflow */
1471 	if (pgoff + vma_pages(vma) < pgoff)
1472 		return -EOVERFLOW;
1473 
1474 	/* check for overflowing the buffer's size */
1475 	if (pgoff + vma_pages(vma) >
1476 	    dmabuf->size >> PAGE_SHIFT)
1477 		return -EINVAL;
1478 
1479 	/* readjust the vma */
1480 	vma_set_file(vma, dmabuf->file);
1481 	vma->vm_pgoff = pgoff;
1482 
1483 	dma_resv_lock(dmabuf->resv, NULL);
1484 	ret = dmabuf->ops->mmap(dmabuf, vma);
1485 	dma_resv_unlock(dmabuf->resv);
1486 
1487 	return ret;
1488 }
1489 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1490 
1491 /**
1492  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1493  * address space. Same restrictions as for vmap and friends apply.
1494  * @dmabuf:	[in]	buffer to vmap
1495  * @map:	[out]	returns the vmap pointer
1496  *
1497  * This call may fail due to lack of virtual mapping address space.
1498  * These calls are optional in drivers. The intended use for them
1499  * is for mapping objects linear in kernel space for high use objects.
1500  *
1501  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1502  * dma_buf_end_cpu_access() around any cpu access performed through this
1503  * mapping.
1504  *
1505  * Returns 0 on success, or a negative errno code otherwise.
1506  */
1507 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1508 {
1509 	struct iosys_map ptr;
1510 	int ret;
1511 
1512 	iosys_map_clear(map);
1513 
1514 	if (WARN_ON(!dmabuf))
1515 		return -EINVAL;
1516 
1517 	dma_resv_assert_held(dmabuf->resv);
1518 
1519 	if (!dmabuf->ops->vmap)
1520 		return -EINVAL;
1521 
1522 	if (dmabuf->vmapping_counter) {
1523 		dmabuf->vmapping_counter++;
1524 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1525 		*map = dmabuf->vmap_ptr;
1526 		return 0;
1527 	}
1528 
1529 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1530 
1531 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1532 	if (WARN_ON_ONCE(ret))
1533 		return ret;
1534 
1535 	dmabuf->vmap_ptr = ptr;
1536 	dmabuf->vmapping_counter = 1;
1537 
1538 	*map = dmabuf->vmap_ptr;
1539 
1540 	return 0;
1541 }
1542 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1543 
1544 /**
1545  * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1546  * address space. Same restrictions as for vmap and friends apply.
1547  * @dmabuf:	[in]	buffer to vmap
1548  * @map:	[out]	returns the vmap pointer
1549  *
1550  * Unlocked version of dma_buf_vmap()
1551  *
1552  * Returns 0 on success, or a negative errno code otherwise.
1553  */
1554 int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1555 {
1556 	int ret;
1557 
1558 	iosys_map_clear(map);
1559 
1560 	if (WARN_ON(!dmabuf))
1561 		return -EINVAL;
1562 
1563 	dma_resv_lock(dmabuf->resv, NULL);
1564 	ret = dma_buf_vmap(dmabuf, map);
1565 	dma_resv_unlock(dmabuf->resv);
1566 
1567 	return ret;
1568 }
1569 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1570 
1571 /**
1572  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1573  * @dmabuf:	[in]	buffer to vunmap
1574  * @map:	[in]	vmap pointer to vunmap
1575  */
1576 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1577 {
1578 	if (WARN_ON(!dmabuf))
1579 		return;
1580 
1581 	dma_resv_assert_held(dmabuf->resv);
1582 
1583 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1584 	BUG_ON(dmabuf->vmapping_counter == 0);
1585 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1586 
1587 	if (--dmabuf->vmapping_counter == 0) {
1588 		if (dmabuf->ops->vunmap)
1589 			dmabuf->ops->vunmap(dmabuf, map);
1590 		iosys_map_clear(&dmabuf->vmap_ptr);
1591 	}
1592 }
1593 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1594 
1595 /**
1596  * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1597  * @dmabuf:	[in]	buffer to vunmap
1598  * @map:	[in]	vmap pointer to vunmap
1599  */
1600 void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1601 {
1602 	if (WARN_ON(!dmabuf))
1603 		return;
1604 
1605 	dma_resv_lock(dmabuf->resv, NULL);
1606 	dma_buf_vunmap(dmabuf, map);
1607 	dma_resv_unlock(dmabuf->resv);
1608 }
1609 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1610 
1611 #ifdef CONFIG_DEBUG_FS
1612 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1613 {
1614 	struct dma_buf *buf_obj;
1615 	struct dma_buf_attachment *attach_obj;
1616 	int count = 0, attach_count;
1617 	size_t size = 0;
1618 	int ret;
1619 
1620 	ret = mutex_lock_interruptible(&db_list.lock);
1621 
1622 	if (ret)
1623 		return ret;
1624 
1625 	seq_puts(s, "\nDma-buf Objects:\n");
1626 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1627 		   "size", "flags", "mode", "count", "ino");
1628 
1629 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1630 
1631 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1632 		if (ret)
1633 			goto error_unlock;
1634 
1635 
1636 		spin_lock(&buf_obj->name_lock);
1637 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1638 				buf_obj->size,
1639 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1640 				file_count(buf_obj->file),
1641 				buf_obj->exp_name,
1642 				file_inode(buf_obj->file)->i_ino,
1643 				buf_obj->name ?: "<none>");
1644 		spin_unlock(&buf_obj->name_lock);
1645 
1646 		dma_resv_describe(buf_obj->resv, s);
1647 
1648 		seq_puts(s, "\tAttached Devices:\n");
1649 		attach_count = 0;
1650 
1651 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1652 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1653 			attach_count++;
1654 		}
1655 		dma_resv_unlock(buf_obj->resv);
1656 
1657 		seq_printf(s, "Total %d devices attached\n\n",
1658 				attach_count);
1659 
1660 		count++;
1661 		size += buf_obj->size;
1662 	}
1663 
1664 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1665 
1666 	mutex_unlock(&db_list.lock);
1667 	return 0;
1668 
1669 error_unlock:
1670 	mutex_unlock(&db_list.lock);
1671 	return ret;
1672 }
1673 
1674 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1675 
1676 static struct dentry *dma_buf_debugfs_dir;
1677 
1678 static int dma_buf_init_debugfs(void)
1679 {
1680 	struct dentry *d;
1681 	int err = 0;
1682 
1683 	d = debugfs_create_dir("dma_buf", NULL);
1684 	if (IS_ERR(d))
1685 		return PTR_ERR(d);
1686 
1687 	dma_buf_debugfs_dir = d;
1688 
1689 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1690 				NULL, &dma_buf_debug_fops);
1691 	if (IS_ERR(d)) {
1692 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1693 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1694 		dma_buf_debugfs_dir = NULL;
1695 		err = PTR_ERR(d);
1696 	}
1697 
1698 	return err;
1699 }
1700 
1701 static void dma_buf_uninit_debugfs(void)
1702 {
1703 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1704 }
1705 #else
1706 static inline int dma_buf_init_debugfs(void)
1707 {
1708 	return 0;
1709 }
1710 static inline void dma_buf_uninit_debugfs(void)
1711 {
1712 }
1713 #endif
1714 
1715 static int __init dma_buf_init(void)
1716 {
1717 	int ret;
1718 
1719 	ret = dma_buf_init_sysfs_statistics();
1720 	if (ret)
1721 		return ret;
1722 
1723 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1724 	if (IS_ERR(dma_buf_mnt))
1725 		return PTR_ERR(dma_buf_mnt);
1726 
1727 	mutex_init(&db_list.lock);
1728 	INIT_LIST_HEAD(&db_list.head);
1729 	dma_buf_init_debugfs();
1730 	return 0;
1731 }
1732 subsys_initcall(dma_buf_init);
1733 
1734 static void __exit dma_buf_deinit(void)
1735 {
1736 	dma_buf_uninit_debugfs();
1737 	kern_unmount(dma_buf_mnt);
1738 	dma_buf_uninit_sysfs_statistics();
1739 }
1740 __exitcall(dma_buf_deinit);
1741