xref: /openbmc/linux/drivers/dma-buf/dma-buf.c (revision 7e60e389)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 static inline int is_dma_buf_file(struct file *);
33 
34 struct dma_buf_list {
35 	struct list_head head;
36 	struct mutex lock;
37 };
38 
39 static struct dma_buf_list db_list;
40 
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42 {
43 	struct dma_buf *dmabuf;
44 	char name[DMA_BUF_NAME_LEN];
45 	size_t ret = 0;
46 
47 	dmabuf = dentry->d_fsdata;
48 	spin_lock(&dmabuf->name_lock);
49 	if (dmabuf->name)
50 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 	spin_unlock(&dmabuf->name_lock);
52 
53 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 			     dentry->d_name.name, ret > 0 ? name : "");
55 }
56 
57 static void dma_buf_release(struct dentry *dentry)
58 {
59 	struct dma_buf *dmabuf;
60 
61 	dmabuf = dentry->d_fsdata;
62 	if (unlikely(!dmabuf))
63 		return;
64 
65 	BUG_ON(dmabuf->vmapping_counter);
66 
67 	/*
68 	 * Any fences that a dma-buf poll can wait on should be signaled
69 	 * before releasing dma-buf. This is the responsibility of each
70 	 * driver that uses the reservation objects.
71 	 *
72 	 * If you hit this BUG() it means someone dropped their ref to the
73 	 * dma-buf while still having pending operation to the buffer.
74 	 */
75 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
76 
77 	dmabuf->ops->release(dmabuf);
78 
79 	mutex_lock(&db_list.lock);
80 	list_del(&dmabuf->list_node);
81 	mutex_unlock(&db_list.lock);
82 
83 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
84 		dma_resv_fini(dmabuf->resv);
85 
86 	module_put(dmabuf->owner);
87 	kfree(dmabuf->name);
88 	kfree(dmabuf);
89 }
90 
91 static const struct dentry_operations dma_buf_dentry_ops = {
92 	.d_dname = dmabuffs_dname,
93 	.d_release = dma_buf_release,
94 };
95 
96 static struct vfsmount *dma_buf_mnt;
97 
98 static int dma_buf_fs_init_context(struct fs_context *fc)
99 {
100 	struct pseudo_fs_context *ctx;
101 
102 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
103 	if (!ctx)
104 		return -ENOMEM;
105 	ctx->dops = &dma_buf_dentry_ops;
106 	return 0;
107 }
108 
109 static struct file_system_type dma_buf_fs_type = {
110 	.name = "dmabuf",
111 	.init_fs_context = dma_buf_fs_init_context,
112 	.kill_sb = kill_anon_super,
113 };
114 
115 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
116 {
117 	struct dma_buf *dmabuf;
118 
119 	if (!is_dma_buf_file(file))
120 		return -EINVAL;
121 
122 	dmabuf = file->private_data;
123 
124 	/* check if buffer supports mmap */
125 	if (!dmabuf->ops->mmap)
126 		return -EINVAL;
127 
128 	/* check for overflowing the buffer's size */
129 	if (vma->vm_pgoff + vma_pages(vma) >
130 	    dmabuf->size >> PAGE_SHIFT)
131 		return -EINVAL;
132 
133 	return dmabuf->ops->mmap(dmabuf, vma);
134 }
135 
136 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
137 {
138 	struct dma_buf *dmabuf;
139 	loff_t base;
140 
141 	if (!is_dma_buf_file(file))
142 		return -EBADF;
143 
144 	dmabuf = file->private_data;
145 
146 	/* only support discovering the end of the buffer,
147 	   but also allow SEEK_SET to maintain the idiomatic
148 	   SEEK_END(0), SEEK_CUR(0) pattern */
149 	if (whence == SEEK_END)
150 		base = dmabuf->size;
151 	else if (whence == SEEK_SET)
152 		base = 0;
153 	else
154 		return -EINVAL;
155 
156 	if (offset != 0)
157 		return -EINVAL;
158 
159 	return base + offset;
160 }
161 
162 /**
163  * DOC: implicit fence polling
164  *
165  * To support cross-device and cross-driver synchronization of buffer access
166  * implicit fences (represented internally in the kernel with &struct dma_fence)
167  * can be attached to a &dma_buf. The glue for that and a few related things are
168  * provided in the &dma_resv structure.
169  *
170  * Userspace can query the state of these implicitly tracked fences using poll()
171  * and related system calls:
172  *
173  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
174  *   most recent write or exclusive fence.
175  *
176  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
177  *   all attached fences, shared and exclusive ones.
178  *
179  * Note that this only signals the completion of the respective fences, i.e. the
180  * DMA transfers are complete. Cache flushing and any other necessary
181  * preparations before CPU access can begin still need to happen.
182  */
183 
184 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
185 {
186 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
187 	unsigned long flags;
188 
189 	spin_lock_irqsave(&dcb->poll->lock, flags);
190 	wake_up_locked_poll(dcb->poll, dcb->active);
191 	dcb->active = 0;
192 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
193 }
194 
195 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
196 {
197 	struct dma_buf *dmabuf;
198 	struct dma_resv *resv;
199 	struct dma_resv_list *fobj;
200 	struct dma_fence *fence_excl;
201 	__poll_t events;
202 	unsigned shared_count, seq;
203 
204 	dmabuf = file->private_data;
205 	if (!dmabuf || !dmabuf->resv)
206 		return EPOLLERR;
207 
208 	resv = dmabuf->resv;
209 
210 	poll_wait(file, &dmabuf->poll, poll);
211 
212 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
213 	if (!events)
214 		return 0;
215 
216 retry:
217 	seq = read_seqcount_begin(&resv->seq);
218 	rcu_read_lock();
219 
220 	fobj = rcu_dereference(resv->fence);
221 	if (fobj)
222 		shared_count = fobj->shared_count;
223 	else
224 		shared_count = 0;
225 	fence_excl = rcu_dereference(resv->fence_excl);
226 	if (read_seqcount_retry(&resv->seq, seq)) {
227 		rcu_read_unlock();
228 		goto retry;
229 	}
230 
231 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
232 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
233 		__poll_t pevents = EPOLLIN;
234 
235 		if (shared_count == 0)
236 			pevents |= EPOLLOUT;
237 
238 		spin_lock_irq(&dmabuf->poll.lock);
239 		if (dcb->active) {
240 			dcb->active |= pevents;
241 			events &= ~pevents;
242 		} else
243 			dcb->active = pevents;
244 		spin_unlock_irq(&dmabuf->poll.lock);
245 
246 		if (events & pevents) {
247 			if (!dma_fence_get_rcu(fence_excl)) {
248 				/* force a recheck */
249 				events &= ~pevents;
250 				dma_buf_poll_cb(NULL, &dcb->cb);
251 			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
252 							   dma_buf_poll_cb)) {
253 				events &= ~pevents;
254 				dma_fence_put(fence_excl);
255 			} else {
256 				/*
257 				 * No callback queued, wake up any additional
258 				 * waiters.
259 				 */
260 				dma_fence_put(fence_excl);
261 				dma_buf_poll_cb(NULL, &dcb->cb);
262 			}
263 		}
264 	}
265 
266 	if ((events & EPOLLOUT) && shared_count > 0) {
267 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
268 		int i;
269 
270 		/* Only queue a new callback if no event has fired yet */
271 		spin_lock_irq(&dmabuf->poll.lock);
272 		if (dcb->active)
273 			events &= ~EPOLLOUT;
274 		else
275 			dcb->active = EPOLLOUT;
276 		spin_unlock_irq(&dmabuf->poll.lock);
277 
278 		if (!(events & EPOLLOUT))
279 			goto out;
280 
281 		for (i = 0; i < shared_count; ++i) {
282 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
283 
284 			if (!dma_fence_get_rcu(fence)) {
285 				/*
286 				 * fence refcount dropped to zero, this means
287 				 * that fobj has been freed
288 				 *
289 				 * call dma_buf_poll_cb and force a recheck!
290 				 */
291 				events &= ~EPOLLOUT;
292 				dma_buf_poll_cb(NULL, &dcb->cb);
293 				break;
294 			}
295 			if (!dma_fence_add_callback(fence, &dcb->cb,
296 						    dma_buf_poll_cb)) {
297 				dma_fence_put(fence);
298 				events &= ~EPOLLOUT;
299 				break;
300 			}
301 			dma_fence_put(fence);
302 		}
303 
304 		/* No callback queued, wake up any additional waiters. */
305 		if (i == shared_count)
306 			dma_buf_poll_cb(NULL, &dcb->cb);
307 	}
308 
309 out:
310 	rcu_read_unlock();
311 	return events;
312 }
313 
314 /**
315  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
316  * The name of the dma-buf buffer can only be set when the dma-buf is not
317  * attached to any devices. It could theoritically support changing the
318  * name of the dma-buf if the same piece of memory is used for multiple
319  * purpose between different devices.
320  *
321  * @dmabuf: [in]     dmabuf buffer that will be renamed.
322  * @buf:    [in]     A piece of userspace memory that contains the name of
323  *                   the dma-buf.
324  *
325  * Returns 0 on success. If the dma-buf buffer is already attached to
326  * devices, return -EBUSY.
327  *
328  */
329 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
330 {
331 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
332 	long ret = 0;
333 
334 	if (IS_ERR(name))
335 		return PTR_ERR(name);
336 
337 	dma_resv_lock(dmabuf->resv, NULL);
338 	if (!list_empty(&dmabuf->attachments)) {
339 		ret = -EBUSY;
340 		kfree(name);
341 		goto out_unlock;
342 	}
343 	spin_lock(&dmabuf->name_lock);
344 	kfree(dmabuf->name);
345 	dmabuf->name = name;
346 	spin_unlock(&dmabuf->name_lock);
347 
348 out_unlock:
349 	dma_resv_unlock(dmabuf->resv);
350 	return ret;
351 }
352 
353 static long dma_buf_ioctl(struct file *file,
354 			  unsigned int cmd, unsigned long arg)
355 {
356 	struct dma_buf *dmabuf;
357 	struct dma_buf_sync sync;
358 	enum dma_data_direction direction;
359 	int ret;
360 
361 	dmabuf = file->private_data;
362 
363 	switch (cmd) {
364 	case DMA_BUF_IOCTL_SYNC:
365 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
366 			return -EFAULT;
367 
368 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
369 			return -EINVAL;
370 
371 		switch (sync.flags & DMA_BUF_SYNC_RW) {
372 		case DMA_BUF_SYNC_READ:
373 			direction = DMA_FROM_DEVICE;
374 			break;
375 		case DMA_BUF_SYNC_WRITE:
376 			direction = DMA_TO_DEVICE;
377 			break;
378 		case DMA_BUF_SYNC_RW:
379 			direction = DMA_BIDIRECTIONAL;
380 			break;
381 		default:
382 			return -EINVAL;
383 		}
384 
385 		if (sync.flags & DMA_BUF_SYNC_END)
386 			ret = dma_buf_end_cpu_access(dmabuf, direction);
387 		else
388 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
389 
390 		return ret;
391 
392 	case DMA_BUF_SET_NAME_A:
393 	case DMA_BUF_SET_NAME_B:
394 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
395 
396 	default:
397 		return -ENOTTY;
398 	}
399 }
400 
401 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
402 {
403 	struct dma_buf *dmabuf = file->private_data;
404 
405 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
406 	/* Don't count the temporary reference taken inside procfs seq_show */
407 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
408 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
409 	spin_lock(&dmabuf->name_lock);
410 	if (dmabuf->name)
411 		seq_printf(m, "name:\t%s\n", dmabuf->name);
412 	spin_unlock(&dmabuf->name_lock);
413 }
414 
415 static const struct file_operations dma_buf_fops = {
416 	.mmap		= dma_buf_mmap_internal,
417 	.llseek		= dma_buf_llseek,
418 	.poll		= dma_buf_poll,
419 	.unlocked_ioctl	= dma_buf_ioctl,
420 	.compat_ioctl	= compat_ptr_ioctl,
421 	.show_fdinfo	= dma_buf_show_fdinfo,
422 };
423 
424 /*
425  * is_dma_buf_file - Check if struct file* is associated with dma_buf
426  */
427 static inline int is_dma_buf_file(struct file *file)
428 {
429 	return file->f_op == &dma_buf_fops;
430 }
431 
432 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
433 {
434 	struct file *file;
435 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
436 
437 	if (IS_ERR(inode))
438 		return ERR_CAST(inode);
439 
440 	inode->i_size = dmabuf->size;
441 	inode_set_bytes(inode, dmabuf->size);
442 
443 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
444 				 flags, &dma_buf_fops);
445 	if (IS_ERR(file))
446 		goto err_alloc_file;
447 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
448 	file->private_data = dmabuf;
449 	file->f_path.dentry->d_fsdata = dmabuf;
450 
451 	return file;
452 
453 err_alloc_file:
454 	iput(inode);
455 	return file;
456 }
457 
458 /**
459  * DOC: dma buf device access
460  *
461  * For device DMA access to a shared DMA buffer the usual sequence of operations
462  * is fairly simple:
463  *
464  * 1. The exporter defines his exporter instance using
465  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
466  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
467  *    as a file descriptor by calling dma_buf_fd().
468  *
469  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
470  *    to share with: First the filedescriptor is converted to a &dma_buf using
471  *    dma_buf_get(). Then the buffer is attached to the device using
472  *    dma_buf_attach().
473  *
474  *    Up to this stage the exporter is still free to migrate or reallocate the
475  *    backing storage.
476  *
477  * 3. Once the buffer is attached to all devices userspace can initiate DMA
478  *    access to the shared buffer. In the kernel this is done by calling
479  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
480  *
481  * 4. Once a driver is done with a shared buffer it needs to call
482  *    dma_buf_detach() (after cleaning up any mappings) and then release the
483  *    reference acquired with dma_buf_get() by calling dma_buf_put().
484  *
485  * For the detailed semantics exporters are expected to implement see
486  * &dma_buf_ops.
487  */
488 
489 /**
490  * dma_buf_export - Creates a new dma_buf, and associates an anon file
491  * with this buffer, so it can be exported.
492  * Also connect the allocator specific data and ops to the buffer.
493  * Additionally, provide a name string for exporter; useful in debugging.
494  *
495  * @exp_info:	[in]	holds all the export related information provided
496  *			by the exporter. see &struct dma_buf_export_info
497  *			for further details.
498  *
499  * Returns, on success, a newly created struct dma_buf object, which wraps the
500  * supplied private data and operations for struct dma_buf_ops. On either
501  * missing ops, or error in allocating struct dma_buf, will return negative
502  * error.
503  *
504  * For most cases the easiest way to create @exp_info is through the
505  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
506  */
507 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
508 {
509 	struct dma_buf *dmabuf;
510 	struct dma_resv *resv = exp_info->resv;
511 	struct file *file;
512 	size_t alloc_size = sizeof(struct dma_buf);
513 	int ret;
514 
515 	if (!exp_info->resv)
516 		alloc_size += sizeof(struct dma_resv);
517 	else
518 		/* prevent &dma_buf[1] == dma_buf->resv */
519 		alloc_size += 1;
520 
521 	if (WARN_ON(!exp_info->priv
522 			  || !exp_info->ops
523 			  || !exp_info->ops->map_dma_buf
524 			  || !exp_info->ops->unmap_dma_buf
525 			  || !exp_info->ops->release)) {
526 		return ERR_PTR(-EINVAL);
527 	}
528 
529 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
530 		    (exp_info->ops->pin || exp_info->ops->unpin)))
531 		return ERR_PTR(-EINVAL);
532 
533 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
534 		return ERR_PTR(-EINVAL);
535 
536 	if (!try_module_get(exp_info->owner))
537 		return ERR_PTR(-ENOENT);
538 
539 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
540 	if (!dmabuf) {
541 		ret = -ENOMEM;
542 		goto err_module;
543 	}
544 
545 	dmabuf->priv = exp_info->priv;
546 	dmabuf->ops = exp_info->ops;
547 	dmabuf->size = exp_info->size;
548 	dmabuf->exp_name = exp_info->exp_name;
549 	dmabuf->owner = exp_info->owner;
550 	spin_lock_init(&dmabuf->name_lock);
551 	init_waitqueue_head(&dmabuf->poll);
552 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
553 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
554 
555 	if (!resv) {
556 		resv = (struct dma_resv *)&dmabuf[1];
557 		dma_resv_init(resv);
558 	}
559 	dmabuf->resv = resv;
560 
561 	file = dma_buf_getfile(dmabuf, exp_info->flags);
562 	if (IS_ERR(file)) {
563 		ret = PTR_ERR(file);
564 		goto err_dmabuf;
565 	}
566 
567 	file->f_mode |= FMODE_LSEEK;
568 	dmabuf->file = file;
569 
570 	mutex_init(&dmabuf->lock);
571 	INIT_LIST_HEAD(&dmabuf->attachments);
572 
573 	mutex_lock(&db_list.lock);
574 	list_add(&dmabuf->list_node, &db_list.head);
575 	mutex_unlock(&db_list.lock);
576 
577 	return dmabuf;
578 
579 err_dmabuf:
580 	kfree(dmabuf);
581 err_module:
582 	module_put(exp_info->owner);
583 	return ERR_PTR(ret);
584 }
585 EXPORT_SYMBOL_GPL(dma_buf_export);
586 
587 /**
588  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
589  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
590  * @flags:      [in]    flags to give to fd
591  *
592  * On success, returns an associated 'fd'. Else, returns error.
593  */
594 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
595 {
596 	int fd;
597 
598 	if (!dmabuf || !dmabuf->file)
599 		return -EINVAL;
600 
601 	fd = get_unused_fd_flags(flags);
602 	if (fd < 0)
603 		return fd;
604 
605 	fd_install(fd, dmabuf->file);
606 
607 	return fd;
608 }
609 EXPORT_SYMBOL_GPL(dma_buf_fd);
610 
611 /**
612  * dma_buf_get - returns the struct dma_buf related to an fd
613  * @fd:	[in]	fd associated with the struct dma_buf to be returned
614  *
615  * On success, returns the struct dma_buf associated with an fd; uses
616  * file's refcounting done by fget to increase refcount. returns ERR_PTR
617  * otherwise.
618  */
619 struct dma_buf *dma_buf_get(int fd)
620 {
621 	struct file *file;
622 
623 	file = fget(fd);
624 
625 	if (!file)
626 		return ERR_PTR(-EBADF);
627 
628 	if (!is_dma_buf_file(file)) {
629 		fput(file);
630 		return ERR_PTR(-EINVAL);
631 	}
632 
633 	return file->private_data;
634 }
635 EXPORT_SYMBOL_GPL(dma_buf_get);
636 
637 /**
638  * dma_buf_put - decreases refcount of the buffer
639  * @dmabuf:	[in]	buffer to reduce refcount of
640  *
641  * Uses file's refcounting done implicitly by fput().
642  *
643  * If, as a result of this call, the refcount becomes 0, the 'release' file
644  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
645  * in turn, and frees the memory allocated for dmabuf when exported.
646  */
647 void dma_buf_put(struct dma_buf *dmabuf)
648 {
649 	if (WARN_ON(!dmabuf || !dmabuf->file))
650 		return;
651 
652 	fput(dmabuf->file);
653 }
654 EXPORT_SYMBOL_GPL(dma_buf_put);
655 
656 static void mangle_sg_table(struct sg_table *sg_table)
657 {
658 #ifdef CONFIG_DMABUF_DEBUG
659 	int i;
660 	struct scatterlist *sg;
661 
662 	/* To catch abuse of the underlying struct page by importers mix
663 	 * up the bits, but take care to preserve the low SG_ bits to
664 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
665 	 * before passing the sgt back to the exporter. */
666 	for_each_sgtable_sg(sg_table, sg, i)
667 		sg->page_link ^= ~0xffUL;
668 #endif
669 
670 }
671 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
672 				       enum dma_data_direction direction)
673 {
674 	struct sg_table *sg_table;
675 
676 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
677 
678 	if (!IS_ERR_OR_NULL(sg_table))
679 		mangle_sg_table(sg_table);
680 
681 	return sg_table;
682 }
683 
684 /**
685  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
686  * @dmabuf:		[in]	buffer to attach device to.
687  * @dev:		[in]	device to be attached.
688  * @importer_ops:	[in]	importer operations for the attachment
689  * @importer_priv:	[in]	importer private pointer for the attachment
690  *
691  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
692  * must be cleaned up by calling dma_buf_detach().
693  *
694  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
695  * functionality.
696  *
697  * Returns:
698  *
699  * A pointer to newly created &dma_buf_attachment on success, or a negative
700  * error code wrapped into a pointer on failure.
701  *
702  * Note that this can fail if the backing storage of @dmabuf is in a place not
703  * accessible to @dev, and cannot be moved to a more suitable place. This is
704  * indicated with the error code -EBUSY.
705  */
706 struct dma_buf_attachment *
707 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
708 		       const struct dma_buf_attach_ops *importer_ops,
709 		       void *importer_priv)
710 {
711 	struct dma_buf_attachment *attach;
712 	int ret;
713 
714 	if (WARN_ON(!dmabuf || !dev))
715 		return ERR_PTR(-EINVAL);
716 
717 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
718 		return ERR_PTR(-EINVAL);
719 
720 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
721 	if (!attach)
722 		return ERR_PTR(-ENOMEM);
723 
724 	attach->dev = dev;
725 	attach->dmabuf = dmabuf;
726 	if (importer_ops)
727 		attach->peer2peer = importer_ops->allow_peer2peer;
728 	attach->importer_ops = importer_ops;
729 	attach->importer_priv = importer_priv;
730 
731 	if (dmabuf->ops->attach) {
732 		ret = dmabuf->ops->attach(dmabuf, attach);
733 		if (ret)
734 			goto err_attach;
735 	}
736 	dma_resv_lock(dmabuf->resv, NULL);
737 	list_add(&attach->node, &dmabuf->attachments);
738 	dma_resv_unlock(dmabuf->resv);
739 
740 	/* When either the importer or the exporter can't handle dynamic
741 	 * mappings we cache the mapping here to avoid issues with the
742 	 * reservation object lock.
743 	 */
744 	if (dma_buf_attachment_is_dynamic(attach) !=
745 	    dma_buf_is_dynamic(dmabuf)) {
746 		struct sg_table *sgt;
747 
748 		if (dma_buf_is_dynamic(attach->dmabuf)) {
749 			dma_resv_lock(attach->dmabuf->resv, NULL);
750 			ret = dma_buf_pin(attach);
751 			if (ret)
752 				goto err_unlock;
753 		}
754 
755 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
756 		if (!sgt)
757 			sgt = ERR_PTR(-ENOMEM);
758 		if (IS_ERR(sgt)) {
759 			ret = PTR_ERR(sgt);
760 			goto err_unpin;
761 		}
762 		if (dma_buf_is_dynamic(attach->dmabuf))
763 			dma_resv_unlock(attach->dmabuf->resv);
764 		attach->sgt = sgt;
765 		attach->dir = DMA_BIDIRECTIONAL;
766 	}
767 
768 	return attach;
769 
770 err_attach:
771 	kfree(attach);
772 	return ERR_PTR(ret);
773 
774 err_unpin:
775 	if (dma_buf_is_dynamic(attach->dmabuf))
776 		dma_buf_unpin(attach);
777 
778 err_unlock:
779 	if (dma_buf_is_dynamic(attach->dmabuf))
780 		dma_resv_unlock(attach->dmabuf->resv);
781 
782 	dma_buf_detach(dmabuf, attach);
783 	return ERR_PTR(ret);
784 }
785 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
786 
787 /**
788  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
789  * @dmabuf:	[in]	buffer to attach device to.
790  * @dev:	[in]	device to be attached.
791  *
792  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
793  * mapping.
794  */
795 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
796 					  struct device *dev)
797 {
798 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
799 }
800 EXPORT_SYMBOL_GPL(dma_buf_attach);
801 
802 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
803 			    struct sg_table *sg_table,
804 			    enum dma_data_direction direction)
805 {
806 	/* uses XOR, hence this unmangles */
807 	mangle_sg_table(sg_table);
808 
809 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
810 }
811 
812 /**
813  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
814  * @dmabuf:	[in]	buffer to detach from.
815  * @attach:	[in]	attachment to be detached; is free'd after this call.
816  *
817  * Clean up a device attachment obtained by calling dma_buf_attach().
818  *
819  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
820  */
821 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
822 {
823 	if (WARN_ON(!dmabuf || !attach))
824 		return;
825 
826 	if (attach->sgt) {
827 		if (dma_buf_is_dynamic(attach->dmabuf))
828 			dma_resv_lock(attach->dmabuf->resv, NULL);
829 
830 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
831 
832 		if (dma_buf_is_dynamic(attach->dmabuf)) {
833 			dma_buf_unpin(attach);
834 			dma_resv_unlock(attach->dmabuf->resv);
835 		}
836 	}
837 
838 	dma_resv_lock(dmabuf->resv, NULL);
839 	list_del(&attach->node);
840 	dma_resv_unlock(dmabuf->resv);
841 	if (dmabuf->ops->detach)
842 		dmabuf->ops->detach(dmabuf, attach);
843 
844 	kfree(attach);
845 }
846 EXPORT_SYMBOL_GPL(dma_buf_detach);
847 
848 /**
849  * dma_buf_pin - Lock down the DMA-buf
850  * @attach:	[in]	attachment which should be pinned
851  *
852  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
853  * call this, and only for limited use cases like scanout and not for temporary
854  * pin operations. It is not permitted to allow userspace to pin arbitrary
855  * amounts of buffers through this interface.
856  *
857  * Buffers must be unpinned by calling dma_buf_unpin().
858  *
859  * Returns:
860  * 0 on success, negative error code on failure.
861  */
862 int dma_buf_pin(struct dma_buf_attachment *attach)
863 {
864 	struct dma_buf *dmabuf = attach->dmabuf;
865 	int ret = 0;
866 
867 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
868 
869 	dma_resv_assert_held(dmabuf->resv);
870 
871 	if (dmabuf->ops->pin)
872 		ret = dmabuf->ops->pin(attach);
873 
874 	return ret;
875 }
876 EXPORT_SYMBOL_GPL(dma_buf_pin);
877 
878 /**
879  * dma_buf_unpin - Unpin a DMA-buf
880  * @attach:	[in]	attachment which should be unpinned
881  *
882  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
883  * any mapping of @attach again and inform the importer through
884  * &dma_buf_attach_ops.move_notify.
885  */
886 void dma_buf_unpin(struct dma_buf_attachment *attach)
887 {
888 	struct dma_buf *dmabuf = attach->dmabuf;
889 
890 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
891 
892 	dma_resv_assert_held(dmabuf->resv);
893 
894 	if (dmabuf->ops->unpin)
895 		dmabuf->ops->unpin(attach);
896 }
897 EXPORT_SYMBOL_GPL(dma_buf_unpin);
898 
899 /**
900  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
901  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
902  * dma_buf_ops.
903  * @attach:	[in]	attachment whose scatterlist is to be returned
904  * @direction:	[in]	direction of DMA transfer
905  *
906  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
907  * on error. May return -EINTR if it is interrupted by a signal.
908  *
909  * On success, the DMA addresses and lengths in the returned scatterlist are
910  * PAGE_SIZE aligned.
911  *
912  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
913  * the underlying backing storage is pinned for as long as a mapping exists,
914  * therefore users/importers should not hold onto a mapping for undue amounts of
915  * time.
916  */
917 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
918 					enum dma_data_direction direction)
919 {
920 	struct sg_table *sg_table;
921 	int r;
922 
923 	might_sleep();
924 
925 	if (WARN_ON(!attach || !attach->dmabuf))
926 		return ERR_PTR(-EINVAL);
927 
928 	if (dma_buf_attachment_is_dynamic(attach))
929 		dma_resv_assert_held(attach->dmabuf->resv);
930 
931 	if (attach->sgt) {
932 		/*
933 		 * Two mappings with different directions for the same
934 		 * attachment are not allowed.
935 		 */
936 		if (attach->dir != direction &&
937 		    attach->dir != DMA_BIDIRECTIONAL)
938 			return ERR_PTR(-EBUSY);
939 
940 		return attach->sgt;
941 	}
942 
943 	if (dma_buf_is_dynamic(attach->dmabuf)) {
944 		dma_resv_assert_held(attach->dmabuf->resv);
945 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
946 			r = dma_buf_pin(attach);
947 			if (r)
948 				return ERR_PTR(r);
949 		}
950 	}
951 
952 	sg_table = __map_dma_buf(attach, direction);
953 	if (!sg_table)
954 		sg_table = ERR_PTR(-ENOMEM);
955 
956 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
957 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
958 		dma_buf_unpin(attach);
959 
960 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
961 		attach->sgt = sg_table;
962 		attach->dir = direction;
963 	}
964 
965 #ifdef CONFIG_DMA_API_DEBUG
966 	if (!IS_ERR(sg_table)) {
967 		struct scatterlist *sg;
968 		u64 addr;
969 		int len;
970 		int i;
971 
972 		for_each_sgtable_dma_sg(sg_table, sg, i) {
973 			addr = sg_dma_address(sg);
974 			len = sg_dma_len(sg);
975 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
976 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
977 					 __func__, addr, len);
978 			}
979 		}
980 	}
981 #endif /* CONFIG_DMA_API_DEBUG */
982 
983 	return sg_table;
984 }
985 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
986 
987 /**
988  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
989  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
990  * dma_buf_ops.
991  * @attach:	[in]	attachment to unmap buffer from
992  * @sg_table:	[in]	scatterlist info of the buffer to unmap
993  * @direction:  [in]    direction of DMA transfer
994  *
995  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
996  */
997 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
998 				struct sg_table *sg_table,
999 				enum dma_data_direction direction)
1000 {
1001 	might_sleep();
1002 
1003 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1004 		return;
1005 
1006 	if (dma_buf_attachment_is_dynamic(attach))
1007 		dma_resv_assert_held(attach->dmabuf->resv);
1008 
1009 	if (attach->sgt == sg_table)
1010 		return;
1011 
1012 	if (dma_buf_is_dynamic(attach->dmabuf))
1013 		dma_resv_assert_held(attach->dmabuf->resv);
1014 
1015 	__unmap_dma_buf(attach, sg_table, direction);
1016 
1017 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1018 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1019 		dma_buf_unpin(attach);
1020 }
1021 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1022 
1023 /**
1024  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1025  *
1026  * @dmabuf:	[in]	buffer which is moving
1027  *
1028  * Informs all attachmenst that they need to destroy and recreated all their
1029  * mappings.
1030  */
1031 void dma_buf_move_notify(struct dma_buf *dmabuf)
1032 {
1033 	struct dma_buf_attachment *attach;
1034 
1035 	dma_resv_assert_held(dmabuf->resv);
1036 
1037 	list_for_each_entry(attach, &dmabuf->attachments, node)
1038 		if (attach->importer_ops)
1039 			attach->importer_ops->move_notify(attach);
1040 }
1041 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1042 
1043 /**
1044  * DOC: cpu access
1045  *
1046  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1047  *
1048  * - Fallback operations in the kernel, for example when a device is connected
1049  *   over USB and the kernel needs to shuffle the data around first before
1050  *   sending it away. Cache coherency is handled by braketing any transactions
1051  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1052  *   access.
1053  *
1054  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1055  *   vmap interface is introduced. Note that on very old 32-bit architectures
1056  *   vmalloc space might be limited and result in vmap calls failing.
1057  *
1058  *   Interfaces::
1059  *
1060  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1061  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1062  *
1063  *   The vmap call can fail if there is no vmap support in the exporter, or if
1064  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1065  *   count for all vmap access and calls down into the exporter's vmap function
1066  *   only when no vmapping exists, and only unmaps it once. Protection against
1067  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1068  *
1069  * - For full compatibility on the importer side with existing userspace
1070  *   interfaces, which might already support mmap'ing buffers. This is needed in
1071  *   many processing pipelines (e.g. feeding a software rendered image into a
1072  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1073  *   framework already supported this and for DMA buffer file descriptors to
1074  *   replace ION buffers mmap support was needed.
1075  *
1076  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1077  *   fd. But like for CPU access there's a need to braket the actual access,
1078  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1079  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1080  *   be restarted.
1081  *
1082  *   Some systems might need some sort of cache coherency management e.g. when
1083  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1084  *   To circumvent this problem there are begin/end coherency markers, that
1085  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1086  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1087  *   sequence would be used like following:
1088  *
1089  *     - mmap dma-buf fd
1090  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1091  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1092  *       want (with the new data being consumed by say the GPU or the scanout
1093  *       device)
1094  *     - munmap once you don't need the buffer any more
1095  *
1096  *    For correctness and optimal performance, it is always required to use
1097  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1098  *    mapped address. Userspace cannot rely on coherent access, even when there
1099  *    are systems where it just works without calling these ioctls.
1100  *
1101  * - And as a CPU fallback in userspace processing pipelines.
1102  *
1103  *   Similar to the motivation for kernel cpu access it is again important that
1104  *   the userspace code of a given importing subsystem can use the same
1105  *   interfaces with a imported dma-buf buffer object as with a native buffer
1106  *   object. This is especially important for drm where the userspace part of
1107  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1108  *   use a different way to mmap a buffer rather invasive.
1109  *
1110  *   The assumption in the current dma-buf interfaces is that redirecting the
1111  *   initial mmap is all that's needed. A survey of some of the existing
1112  *   subsystems shows that no driver seems to do any nefarious thing like
1113  *   syncing up with outstanding asynchronous processing on the device or
1114  *   allocating special resources at fault time. So hopefully this is good
1115  *   enough, since adding interfaces to intercept pagefaults and allow pte
1116  *   shootdowns would increase the complexity quite a bit.
1117  *
1118  *   Interface::
1119  *
1120  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1121  *		       unsigned long);
1122  *
1123  *   If the importing subsystem simply provides a special-purpose mmap call to
1124  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1125  *   equally achieve that for a dma-buf object.
1126  */
1127 
1128 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1129 				      enum dma_data_direction direction)
1130 {
1131 	bool write = (direction == DMA_BIDIRECTIONAL ||
1132 		      direction == DMA_TO_DEVICE);
1133 	struct dma_resv *resv = dmabuf->resv;
1134 	long ret;
1135 
1136 	/* Wait on any implicit rendering fences */
1137 	ret = dma_resv_wait_timeout_rcu(resv, write, true,
1138 						  MAX_SCHEDULE_TIMEOUT);
1139 	if (ret < 0)
1140 		return ret;
1141 
1142 	return 0;
1143 }
1144 
1145 /**
1146  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1147  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1148  * preparations. Coherency is only guaranteed in the specified range for the
1149  * specified access direction.
1150  * @dmabuf:	[in]	buffer to prepare cpu access for.
1151  * @direction:	[in]	length of range for cpu access.
1152  *
1153  * After the cpu access is complete the caller should call
1154  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1155  * it guaranteed to be coherent with other DMA access.
1156  *
1157  * This function will also wait for any DMA transactions tracked through
1158  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1159  * synchronization this function will only ensure cache coherency, callers must
1160  * ensure synchronization with such DMA transactions on their own.
1161  *
1162  * Can return negative error values, returns 0 on success.
1163  */
1164 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1165 			     enum dma_data_direction direction)
1166 {
1167 	int ret = 0;
1168 
1169 	if (WARN_ON(!dmabuf))
1170 		return -EINVAL;
1171 
1172 	might_lock(&dmabuf->resv->lock.base);
1173 
1174 	if (dmabuf->ops->begin_cpu_access)
1175 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1176 
1177 	/* Ensure that all fences are waited upon - but we first allow
1178 	 * the native handler the chance to do so more efficiently if it
1179 	 * chooses. A double invocation here will be reasonably cheap no-op.
1180 	 */
1181 	if (ret == 0)
1182 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1183 
1184 	return ret;
1185 }
1186 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1187 
1188 /**
1189  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1190  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1191  * actions. Coherency is only guaranteed in the specified range for the
1192  * specified access direction.
1193  * @dmabuf:	[in]	buffer to complete cpu access for.
1194  * @direction:	[in]	length of range for cpu access.
1195  *
1196  * This terminates CPU access started with dma_buf_begin_cpu_access().
1197  *
1198  * Can return negative error values, returns 0 on success.
1199  */
1200 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1201 			   enum dma_data_direction direction)
1202 {
1203 	int ret = 0;
1204 
1205 	WARN_ON(!dmabuf);
1206 
1207 	might_lock(&dmabuf->resv->lock.base);
1208 
1209 	if (dmabuf->ops->end_cpu_access)
1210 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1211 
1212 	return ret;
1213 }
1214 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1215 
1216 
1217 /**
1218  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1219  * @dmabuf:	[in]	buffer that should back the vma
1220  * @vma:	[in]	vma for the mmap
1221  * @pgoff:	[in]	offset in pages where this mmap should start within the
1222  *			dma-buf buffer.
1223  *
1224  * This function adjusts the passed in vma so that it points at the file of the
1225  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1226  * checking on the size of the vma. Then it calls the exporters mmap function to
1227  * set up the mapping.
1228  *
1229  * Can return negative error values, returns 0 on success.
1230  */
1231 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1232 		 unsigned long pgoff)
1233 {
1234 	if (WARN_ON(!dmabuf || !vma))
1235 		return -EINVAL;
1236 
1237 	/* check if buffer supports mmap */
1238 	if (!dmabuf->ops->mmap)
1239 		return -EINVAL;
1240 
1241 	/* check for offset overflow */
1242 	if (pgoff + vma_pages(vma) < pgoff)
1243 		return -EOVERFLOW;
1244 
1245 	/* check for overflowing the buffer's size */
1246 	if (pgoff + vma_pages(vma) >
1247 	    dmabuf->size >> PAGE_SHIFT)
1248 		return -EINVAL;
1249 
1250 	/* readjust the vma */
1251 	vma_set_file(vma, dmabuf->file);
1252 	vma->vm_pgoff = pgoff;
1253 
1254 	return dmabuf->ops->mmap(dmabuf, vma);
1255 }
1256 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1257 
1258 /**
1259  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1260  * address space. Same restrictions as for vmap and friends apply.
1261  * @dmabuf:	[in]	buffer to vmap
1262  * @map:	[out]	returns the vmap pointer
1263  *
1264  * This call may fail due to lack of virtual mapping address space.
1265  * These calls are optional in drivers. The intended use for them
1266  * is for mapping objects linear in kernel space for high use objects.
1267  *
1268  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1269  * dma_buf_end_cpu_access() around any cpu access performed through this
1270  * mapping.
1271  *
1272  * Returns 0 on success, or a negative errno code otherwise.
1273  */
1274 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1275 {
1276 	struct dma_buf_map ptr;
1277 	int ret = 0;
1278 
1279 	dma_buf_map_clear(map);
1280 
1281 	if (WARN_ON(!dmabuf))
1282 		return -EINVAL;
1283 
1284 	if (!dmabuf->ops->vmap)
1285 		return -EINVAL;
1286 
1287 	mutex_lock(&dmabuf->lock);
1288 	if (dmabuf->vmapping_counter) {
1289 		dmabuf->vmapping_counter++;
1290 		BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1291 		*map = dmabuf->vmap_ptr;
1292 		goto out_unlock;
1293 	}
1294 
1295 	BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1296 
1297 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1298 	if (WARN_ON_ONCE(ret))
1299 		goto out_unlock;
1300 
1301 	dmabuf->vmap_ptr = ptr;
1302 	dmabuf->vmapping_counter = 1;
1303 
1304 	*map = dmabuf->vmap_ptr;
1305 
1306 out_unlock:
1307 	mutex_unlock(&dmabuf->lock);
1308 	return ret;
1309 }
1310 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1311 
1312 /**
1313  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1314  * @dmabuf:	[in]	buffer to vunmap
1315  * @map:	[in]	vmap pointer to vunmap
1316  */
1317 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1318 {
1319 	if (WARN_ON(!dmabuf))
1320 		return;
1321 
1322 	BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1323 	BUG_ON(dmabuf->vmapping_counter == 0);
1324 	BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1325 
1326 	mutex_lock(&dmabuf->lock);
1327 	if (--dmabuf->vmapping_counter == 0) {
1328 		if (dmabuf->ops->vunmap)
1329 			dmabuf->ops->vunmap(dmabuf, map);
1330 		dma_buf_map_clear(&dmabuf->vmap_ptr);
1331 	}
1332 	mutex_unlock(&dmabuf->lock);
1333 }
1334 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1335 
1336 #ifdef CONFIG_DEBUG_FS
1337 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1338 {
1339 	int ret;
1340 	struct dma_buf *buf_obj;
1341 	struct dma_buf_attachment *attach_obj;
1342 	struct dma_resv *robj;
1343 	struct dma_resv_list *fobj;
1344 	struct dma_fence *fence;
1345 	unsigned seq;
1346 	int count = 0, attach_count, shared_count, i;
1347 	size_t size = 0;
1348 
1349 	ret = mutex_lock_interruptible(&db_list.lock);
1350 
1351 	if (ret)
1352 		return ret;
1353 
1354 	seq_puts(s, "\nDma-buf Objects:\n");
1355 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1356 		   "size", "flags", "mode", "count", "ino");
1357 
1358 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1359 
1360 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1361 		if (ret)
1362 			goto error_unlock;
1363 
1364 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1365 				buf_obj->size,
1366 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1367 				file_count(buf_obj->file),
1368 				buf_obj->exp_name,
1369 				file_inode(buf_obj->file)->i_ino,
1370 				buf_obj->name ?: "");
1371 
1372 		robj = buf_obj->resv;
1373 		while (true) {
1374 			seq = read_seqcount_begin(&robj->seq);
1375 			rcu_read_lock();
1376 			fobj = rcu_dereference(robj->fence);
1377 			shared_count = fobj ? fobj->shared_count : 0;
1378 			fence = rcu_dereference(robj->fence_excl);
1379 			if (!read_seqcount_retry(&robj->seq, seq))
1380 				break;
1381 			rcu_read_unlock();
1382 		}
1383 
1384 		if (fence)
1385 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1386 				   fence->ops->get_driver_name(fence),
1387 				   fence->ops->get_timeline_name(fence),
1388 				   dma_fence_is_signaled(fence) ? "" : "un");
1389 		for (i = 0; i < shared_count; i++) {
1390 			fence = rcu_dereference(fobj->shared[i]);
1391 			if (!dma_fence_get_rcu(fence))
1392 				continue;
1393 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1394 				   fence->ops->get_driver_name(fence),
1395 				   fence->ops->get_timeline_name(fence),
1396 				   dma_fence_is_signaled(fence) ? "" : "un");
1397 			dma_fence_put(fence);
1398 		}
1399 		rcu_read_unlock();
1400 
1401 		seq_puts(s, "\tAttached Devices:\n");
1402 		attach_count = 0;
1403 
1404 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1405 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1406 			attach_count++;
1407 		}
1408 		dma_resv_unlock(buf_obj->resv);
1409 
1410 		seq_printf(s, "Total %d devices attached\n\n",
1411 				attach_count);
1412 
1413 		count++;
1414 		size += buf_obj->size;
1415 	}
1416 
1417 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1418 
1419 	mutex_unlock(&db_list.lock);
1420 	return 0;
1421 
1422 error_unlock:
1423 	mutex_unlock(&db_list.lock);
1424 	return ret;
1425 }
1426 
1427 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1428 
1429 static struct dentry *dma_buf_debugfs_dir;
1430 
1431 static int dma_buf_init_debugfs(void)
1432 {
1433 	struct dentry *d;
1434 	int err = 0;
1435 
1436 	d = debugfs_create_dir("dma_buf", NULL);
1437 	if (IS_ERR(d))
1438 		return PTR_ERR(d);
1439 
1440 	dma_buf_debugfs_dir = d;
1441 
1442 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1443 				NULL, &dma_buf_debug_fops);
1444 	if (IS_ERR(d)) {
1445 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1446 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1447 		dma_buf_debugfs_dir = NULL;
1448 		err = PTR_ERR(d);
1449 	}
1450 
1451 	return err;
1452 }
1453 
1454 static void dma_buf_uninit_debugfs(void)
1455 {
1456 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1457 }
1458 #else
1459 static inline int dma_buf_init_debugfs(void)
1460 {
1461 	return 0;
1462 }
1463 static inline void dma_buf_uninit_debugfs(void)
1464 {
1465 }
1466 #endif
1467 
1468 static int __init dma_buf_init(void)
1469 {
1470 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1471 	if (IS_ERR(dma_buf_mnt))
1472 		return PTR_ERR(dma_buf_mnt);
1473 
1474 	mutex_init(&db_list.lock);
1475 	INIT_LIST_HEAD(&db_list.head);
1476 	dma_buf_init_debugfs();
1477 	return 0;
1478 }
1479 subsys_initcall(dma_buf_init);
1480 
1481 static void __exit dma_buf_deinit(void)
1482 {
1483 	dma_buf_uninit_debugfs();
1484 	kern_unmount(dma_buf_mnt);
1485 }
1486 __exitcall(dma_buf_deinit);
1487