xref: /openbmc/linux/drivers/dma-buf/dma-buf.c (revision 9e255e2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 static inline int is_dma_buf_file(struct file *);
33 
34 struct dma_buf_list {
35 	struct list_head head;
36 	struct mutex lock;
37 };
38 
39 static struct dma_buf_list db_list;
40 
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42 {
43 	struct dma_buf *dmabuf;
44 	char name[DMA_BUF_NAME_LEN];
45 	size_t ret = 0;
46 
47 	dmabuf = dentry->d_fsdata;
48 	spin_lock(&dmabuf->name_lock);
49 	if (dmabuf->name)
50 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 	spin_unlock(&dmabuf->name_lock);
52 
53 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 			     dentry->d_name.name, ret > 0 ? name : "");
55 }
56 
57 static void dma_buf_release(struct dentry *dentry)
58 {
59 	struct dma_buf *dmabuf;
60 
61 	dmabuf = dentry->d_fsdata;
62 	if (unlikely(!dmabuf))
63 		return;
64 
65 	BUG_ON(dmabuf->vmapping_counter);
66 
67 	/*
68 	 * Any fences that a dma-buf poll can wait on should be signaled
69 	 * before releasing dma-buf. This is the responsibility of each
70 	 * driver that uses the reservation objects.
71 	 *
72 	 * If you hit this BUG() it means someone dropped their ref to the
73 	 * dma-buf while still having pending operation to the buffer.
74 	 */
75 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
76 
77 	dmabuf->ops->release(dmabuf);
78 
79 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 		dma_resv_fini(dmabuf->resv);
81 
82 	module_put(dmabuf->owner);
83 	kfree(dmabuf->name);
84 	kfree(dmabuf);
85 }
86 
87 static int dma_buf_file_release(struct inode *inode, struct file *file)
88 {
89 	struct dma_buf *dmabuf;
90 
91 	if (!is_dma_buf_file(file))
92 		return -EINVAL;
93 
94 	dmabuf = file->private_data;
95 
96 	mutex_lock(&db_list.lock);
97 	list_del(&dmabuf->list_node);
98 	mutex_unlock(&db_list.lock);
99 
100 	return 0;
101 }
102 
103 static const struct dentry_operations dma_buf_dentry_ops = {
104 	.d_dname = dmabuffs_dname,
105 	.d_release = dma_buf_release,
106 };
107 
108 static struct vfsmount *dma_buf_mnt;
109 
110 static int dma_buf_fs_init_context(struct fs_context *fc)
111 {
112 	struct pseudo_fs_context *ctx;
113 
114 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
115 	if (!ctx)
116 		return -ENOMEM;
117 	ctx->dops = &dma_buf_dentry_ops;
118 	return 0;
119 }
120 
121 static struct file_system_type dma_buf_fs_type = {
122 	.name = "dmabuf",
123 	.init_fs_context = dma_buf_fs_init_context,
124 	.kill_sb = kill_anon_super,
125 };
126 
127 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
128 {
129 	struct dma_buf *dmabuf;
130 
131 	if (!is_dma_buf_file(file))
132 		return -EINVAL;
133 
134 	dmabuf = file->private_data;
135 
136 	/* check if buffer supports mmap */
137 	if (!dmabuf->ops->mmap)
138 		return -EINVAL;
139 
140 	/* check for overflowing the buffer's size */
141 	if (vma->vm_pgoff + vma_pages(vma) >
142 	    dmabuf->size >> PAGE_SHIFT)
143 		return -EINVAL;
144 
145 	return dmabuf->ops->mmap(dmabuf, vma);
146 }
147 
148 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
149 {
150 	struct dma_buf *dmabuf;
151 	loff_t base;
152 
153 	if (!is_dma_buf_file(file))
154 		return -EBADF;
155 
156 	dmabuf = file->private_data;
157 
158 	/* only support discovering the end of the buffer,
159 	   but also allow SEEK_SET to maintain the idiomatic
160 	   SEEK_END(0), SEEK_CUR(0) pattern */
161 	if (whence == SEEK_END)
162 		base = dmabuf->size;
163 	else if (whence == SEEK_SET)
164 		base = 0;
165 	else
166 		return -EINVAL;
167 
168 	if (offset != 0)
169 		return -EINVAL;
170 
171 	return base + offset;
172 }
173 
174 /**
175  * DOC: implicit fence polling
176  *
177  * To support cross-device and cross-driver synchronization of buffer access
178  * implicit fences (represented internally in the kernel with &struct dma_fence)
179  * can be attached to a &dma_buf. The glue for that and a few related things are
180  * provided in the &dma_resv structure.
181  *
182  * Userspace can query the state of these implicitly tracked fences using poll()
183  * and related system calls:
184  *
185  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
186  *   most recent write or exclusive fence.
187  *
188  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
189  *   all attached fences, shared and exclusive ones.
190  *
191  * Note that this only signals the completion of the respective fences, i.e. the
192  * DMA transfers are complete. Cache flushing and any other necessary
193  * preparations before CPU access can begin still need to happen.
194  */
195 
196 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
197 {
198 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
199 	unsigned long flags;
200 
201 	spin_lock_irqsave(&dcb->poll->lock, flags);
202 	wake_up_locked_poll(dcb->poll, dcb->active);
203 	dcb->active = 0;
204 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
205 }
206 
207 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
208 {
209 	struct dma_buf *dmabuf;
210 	struct dma_resv *resv;
211 	struct dma_resv_list *fobj;
212 	struct dma_fence *fence_excl;
213 	__poll_t events;
214 	unsigned shared_count, seq;
215 
216 	dmabuf = file->private_data;
217 	if (!dmabuf || !dmabuf->resv)
218 		return EPOLLERR;
219 
220 	resv = dmabuf->resv;
221 
222 	poll_wait(file, &dmabuf->poll, poll);
223 
224 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
225 	if (!events)
226 		return 0;
227 
228 retry:
229 	seq = read_seqcount_begin(&resv->seq);
230 	rcu_read_lock();
231 
232 	fobj = rcu_dereference(resv->fence);
233 	if (fobj)
234 		shared_count = fobj->shared_count;
235 	else
236 		shared_count = 0;
237 	fence_excl = rcu_dereference(resv->fence_excl);
238 	if (read_seqcount_retry(&resv->seq, seq)) {
239 		rcu_read_unlock();
240 		goto retry;
241 	}
242 
243 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
244 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
245 		__poll_t pevents = EPOLLIN;
246 
247 		if (shared_count == 0)
248 			pevents |= EPOLLOUT;
249 
250 		spin_lock_irq(&dmabuf->poll.lock);
251 		if (dcb->active) {
252 			dcb->active |= pevents;
253 			events &= ~pevents;
254 		} else
255 			dcb->active = pevents;
256 		spin_unlock_irq(&dmabuf->poll.lock);
257 
258 		if (events & pevents) {
259 			if (!dma_fence_get_rcu(fence_excl)) {
260 				/* force a recheck */
261 				events &= ~pevents;
262 				dma_buf_poll_cb(NULL, &dcb->cb);
263 			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
264 							   dma_buf_poll_cb)) {
265 				events &= ~pevents;
266 				dma_fence_put(fence_excl);
267 			} else {
268 				/*
269 				 * No callback queued, wake up any additional
270 				 * waiters.
271 				 */
272 				dma_fence_put(fence_excl);
273 				dma_buf_poll_cb(NULL, &dcb->cb);
274 			}
275 		}
276 	}
277 
278 	if ((events & EPOLLOUT) && shared_count > 0) {
279 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
280 		int i;
281 
282 		/* Only queue a new callback if no event has fired yet */
283 		spin_lock_irq(&dmabuf->poll.lock);
284 		if (dcb->active)
285 			events &= ~EPOLLOUT;
286 		else
287 			dcb->active = EPOLLOUT;
288 		spin_unlock_irq(&dmabuf->poll.lock);
289 
290 		if (!(events & EPOLLOUT))
291 			goto out;
292 
293 		for (i = 0; i < shared_count; ++i) {
294 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
295 
296 			if (!dma_fence_get_rcu(fence)) {
297 				/*
298 				 * fence refcount dropped to zero, this means
299 				 * that fobj has been freed
300 				 *
301 				 * call dma_buf_poll_cb and force a recheck!
302 				 */
303 				events &= ~EPOLLOUT;
304 				dma_buf_poll_cb(NULL, &dcb->cb);
305 				break;
306 			}
307 			if (!dma_fence_add_callback(fence, &dcb->cb,
308 						    dma_buf_poll_cb)) {
309 				dma_fence_put(fence);
310 				events &= ~EPOLLOUT;
311 				break;
312 			}
313 			dma_fence_put(fence);
314 		}
315 
316 		/* No callback queued, wake up any additional waiters. */
317 		if (i == shared_count)
318 			dma_buf_poll_cb(NULL, &dcb->cb);
319 	}
320 
321 out:
322 	rcu_read_unlock();
323 	return events;
324 }
325 
326 /**
327  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
328  * The name of the dma-buf buffer can only be set when the dma-buf is not
329  * attached to any devices. It could theoritically support changing the
330  * name of the dma-buf if the same piece of memory is used for multiple
331  * purpose between different devices.
332  *
333  * @dmabuf: [in]     dmabuf buffer that will be renamed.
334  * @buf:    [in]     A piece of userspace memory that contains the name of
335  *                   the dma-buf.
336  *
337  * Returns 0 on success. If the dma-buf buffer is already attached to
338  * devices, return -EBUSY.
339  *
340  */
341 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
342 {
343 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
344 	long ret = 0;
345 
346 	if (IS_ERR(name))
347 		return PTR_ERR(name);
348 
349 	dma_resv_lock(dmabuf->resv, NULL);
350 	if (!list_empty(&dmabuf->attachments)) {
351 		ret = -EBUSY;
352 		kfree(name);
353 		goto out_unlock;
354 	}
355 	spin_lock(&dmabuf->name_lock);
356 	kfree(dmabuf->name);
357 	dmabuf->name = name;
358 	spin_unlock(&dmabuf->name_lock);
359 
360 out_unlock:
361 	dma_resv_unlock(dmabuf->resv);
362 	return ret;
363 }
364 
365 static long dma_buf_ioctl(struct file *file,
366 			  unsigned int cmd, unsigned long arg)
367 {
368 	struct dma_buf *dmabuf;
369 	struct dma_buf_sync sync;
370 	enum dma_data_direction direction;
371 	int ret;
372 
373 	dmabuf = file->private_data;
374 
375 	switch (cmd) {
376 	case DMA_BUF_IOCTL_SYNC:
377 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
378 			return -EFAULT;
379 
380 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
381 			return -EINVAL;
382 
383 		switch (sync.flags & DMA_BUF_SYNC_RW) {
384 		case DMA_BUF_SYNC_READ:
385 			direction = DMA_FROM_DEVICE;
386 			break;
387 		case DMA_BUF_SYNC_WRITE:
388 			direction = DMA_TO_DEVICE;
389 			break;
390 		case DMA_BUF_SYNC_RW:
391 			direction = DMA_BIDIRECTIONAL;
392 			break;
393 		default:
394 			return -EINVAL;
395 		}
396 
397 		if (sync.flags & DMA_BUF_SYNC_END)
398 			ret = dma_buf_end_cpu_access(dmabuf, direction);
399 		else
400 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
401 
402 		return ret;
403 
404 	case DMA_BUF_SET_NAME_A:
405 	case DMA_BUF_SET_NAME_B:
406 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
407 
408 	default:
409 		return -ENOTTY;
410 	}
411 }
412 
413 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
414 {
415 	struct dma_buf *dmabuf = file->private_data;
416 
417 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
418 	/* Don't count the temporary reference taken inside procfs seq_show */
419 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
420 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
421 	spin_lock(&dmabuf->name_lock);
422 	if (dmabuf->name)
423 		seq_printf(m, "name:\t%s\n", dmabuf->name);
424 	spin_unlock(&dmabuf->name_lock);
425 }
426 
427 static const struct file_operations dma_buf_fops = {
428 	.release	= dma_buf_file_release,
429 	.mmap		= dma_buf_mmap_internal,
430 	.llseek		= dma_buf_llseek,
431 	.poll		= dma_buf_poll,
432 	.unlocked_ioctl	= dma_buf_ioctl,
433 	.compat_ioctl	= compat_ptr_ioctl,
434 	.show_fdinfo	= dma_buf_show_fdinfo,
435 };
436 
437 /*
438  * is_dma_buf_file - Check if struct file* is associated with dma_buf
439  */
440 static inline int is_dma_buf_file(struct file *file)
441 {
442 	return file->f_op == &dma_buf_fops;
443 }
444 
445 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
446 {
447 	struct file *file;
448 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
449 
450 	if (IS_ERR(inode))
451 		return ERR_CAST(inode);
452 
453 	inode->i_size = dmabuf->size;
454 	inode_set_bytes(inode, dmabuf->size);
455 
456 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
457 				 flags, &dma_buf_fops);
458 	if (IS_ERR(file))
459 		goto err_alloc_file;
460 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
461 	file->private_data = dmabuf;
462 	file->f_path.dentry->d_fsdata = dmabuf;
463 
464 	return file;
465 
466 err_alloc_file:
467 	iput(inode);
468 	return file;
469 }
470 
471 /**
472  * DOC: dma buf device access
473  *
474  * For device DMA access to a shared DMA buffer the usual sequence of operations
475  * is fairly simple:
476  *
477  * 1. The exporter defines his exporter instance using
478  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
479  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
480  *    as a file descriptor by calling dma_buf_fd().
481  *
482  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
483  *    to share with: First the filedescriptor is converted to a &dma_buf using
484  *    dma_buf_get(). Then the buffer is attached to the device using
485  *    dma_buf_attach().
486  *
487  *    Up to this stage the exporter is still free to migrate or reallocate the
488  *    backing storage.
489  *
490  * 3. Once the buffer is attached to all devices userspace can initiate DMA
491  *    access to the shared buffer. In the kernel this is done by calling
492  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
493  *
494  * 4. Once a driver is done with a shared buffer it needs to call
495  *    dma_buf_detach() (after cleaning up any mappings) and then release the
496  *    reference acquired with dma_buf_get() by calling dma_buf_put().
497  *
498  * For the detailed semantics exporters are expected to implement see
499  * &dma_buf_ops.
500  */
501 
502 /**
503  * dma_buf_export - Creates a new dma_buf, and associates an anon file
504  * with this buffer, so it can be exported.
505  * Also connect the allocator specific data and ops to the buffer.
506  * Additionally, provide a name string for exporter; useful in debugging.
507  *
508  * @exp_info:	[in]	holds all the export related information provided
509  *			by the exporter. see &struct dma_buf_export_info
510  *			for further details.
511  *
512  * Returns, on success, a newly created struct dma_buf object, which wraps the
513  * supplied private data and operations for struct dma_buf_ops. On either
514  * missing ops, or error in allocating struct dma_buf, will return negative
515  * error.
516  *
517  * For most cases the easiest way to create @exp_info is through the
518  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
519  */
520 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
521 {
522 	struct dma_buf *dmabuf;
523 	struct dma_resv *resv = exp_info->resv;
524 	struct file *file;
525 	size_t alloc_size = sizeof(struct dma_buf);
526 	int ret;
527 
528 	if (!exp_info->resv)
529 		alloc_size += sizeof(struct dma_resv);
530 	else
531 		/* prevent &dma_buf[1] == dma_buf->resv */
532 		alloc_size += 1;
533 
534 	if (WARN_ON(!exp_info->priv
535 			  || !exp_info->ops
536 			  || !exp_info->ops->map_dma_buf
537 			  || !exp_info->ops->unmap_dma_buf
538 			  || !exp_info->ops->release)) {
539 		return ERR_PTR(-EINVAL);
540 	}
541 
542 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
543 		    (exp_info->ops->pin || exp_info->ops->unpin)))
544 		return ERR_PTR(-EINVAL);
545 
546 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
547 		return ERR_PTR(-EINVAL);
548 
549 	if (!try_module_get(exp_info->owner))
550 		return ERR_PTR(-ENOENT);
551 
552 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
553 	if (!dmabuf) {
554 		ret = -ENOMEM;
555 		goto err_module;
556 	}
557 
558 	dmabuf->priv = exp_info->priv;
559 	dmabuf->ops = exp_info->ops;
560 	dmabuf->size = exp_info->size;
561 	dmabuf->exp_name = exp_info->exp_name;
562 	dmabuf->owner = exp_info->owner;
563 	spin_lock_init(&dmabuf->name_lock);
564 	init_waitqueue_head(&dmabuf->poll);
565 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
566 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
567 
568 	if (!resv) {
569 		resv = (struct dma_resv *)&dmabuf[1];
570 		dma_resv_init(resv);
571 	}
572 	dmabuf->resv = resv;
573 
574 	file = dma_buf_getfile(dmabuf, exp_info->flags);
575 	if (IS_ERR(file)) {
576 		ret = PTR_ERR(file);
577 		goto err_dmabuf;
578 	}
579 
580 	file->f_mode |= FMODE_LSEEK;
581 	dmabuf->file = file;
582 
583 	mutex_init(&dmabuf->lock);
584 	INIT_LIST_HEAD(&dmabuf->attachments);
585 
586 	mutex_lock(&db_list.lock);
587 	list_add(&dmabuf->list_node, &db_list.head);
588 	mutex_unlock(&db_list.lock);
589 
590 	return dmabuf;
591 
592 err_dmabuf:
593 	kfree(dmabuf);
594 err_module:
595 	module_put(exp_info->owner);
596 	return ERR_PTR(ret);
597 }
598 EXPORT_SYMBOL_GPL(dma_buf_export);
599 
600 /**
601  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
602  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
603  * @flags:      [in]    flags to give to fd
604  *
605  * On success, returns an associated 'fd'. Else, returns error.
606  */
607 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
608 {
609 	int fd;
610 
611 	if (!dmabuf || !dmabuf->file)
612 		return -EINVAL;
613 
614 	fd = get_unused_fd_flags(flags);
615 	if (fd < 0)
616 		return fd;
617 
618 	fd_install(fd, dmabuf->file);
619 
620 	return fd;
621 }
622 EXPORT_SYMBOL_GPL(dma_buf_fd);
623 
624 /**
625  * dma_buf_get - returns the struct dma_buf related to an fd
626  * @fd:	[in]	fd associated with the struct dma_buf to be returned
627  *
628  * On success, returns the struct dma_buf associated with an fd; uses
629  * file's refcounting done by fget to increase refcount. returns ERR_PTR
630  * otherwise.
631  */
632 struct dma_buf *dma_buf_get(int fd)
633 {
634 	struct file *file;
635 
636 	file = fget(fd);
637 
638 	if (!file)
639 		return ERR_PTR(-EBADF);
640 
641 	if (!is_dma_buf_file(file)) {
642 		fput(file);
643 		return ERR_PTR(-EINVAL);
644 	}
645 
646 	return file->private_data;
647 }
648 EXPORT_SYMBOL_GPL(dma_buf_get);
649 
650 /**
651  * dma_buf_put - decreases refcount of the buffer
652  * @dmabuf:	[in]	buffer to reduce refcount of
653  *
654  * Uses file's refcounting done implicitly by fput().
655  *
656  * If, as a result of this call, the refcount becomes 0, the 'release' file
657  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
658  * in turn, and frees the memory allocated for dmabuf when exported.
659  */
660 void dma_buf_put(struct dma_buf *dmabuf)
661 {
662 	if (WARN_ON(!dmabuf || !dmabuf->file))
663 		return;
664 
665 	fput(dmabuf->file);
666 }
667 EXPORT_SYMBOL_GPL(dma_buf_put);
668 
669 static void mangle_sg_table(struct sg_table *sg_table)
670 {
671 #ifdef CONFIG_DMABUF_DEBUG
672 	int i;
673 	struct scatterlist *sg;
674 
675 	/* To catch abuse of the underlying struct page by importers mix
676 	 * up the bits, but take care to preserve the low SG_ bits to
677 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
678 	 * before passing the sgt back to the exporter. */
679 	for_each_sgtable_sg(sg_table, sg, i)
680 		sg->page_link ^= ~0xffUL;
681 #endif
682 
683 }
684 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
685 				       enum dma_data_direction direction)
686 {
687 	struct sg_table *sg_table;
688 
689 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
690 
691 	if (!IS_ERR_OR_NULL(sg_table))
692 		mangle_sg_table(sg_table);
693 
694 	return sg_table;
695 }
696 
697 /**
698  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
699  * @dmabuf:		[in]	buffer to attach device to.
700  * @dev:		[in]	device to be attached.
701  * @importer_ops:	[in]	importer operations for the attachment
702  * @importer_priv:	[in]	importer private pointer for the attachment
703  *
704  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
705  * must be cleaned up by calling dma_buf_detach().
706  *
707  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
708  * functionality.
709  *
710  * Returns:
711  *
712  * A pointer to newly created &dma_buf_attachment on success, or a negative
713  * error code wrapped into a pointer on failure.
714  *
715  * Note that this can fail if the backing storage of @dmabuf is in a place not
716  * accessible to @dev, and cannot be moved to a more suitable place. This is
717  * indicated with the error code -EBUSY.
718  */
719 struct dma_buf_attachment *
720 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
721 		       const struct dma_buf_attach_ops *importer_ops,
722 		       void *importer_priv)
723 {
724 	struct dma_buf_attachment *attach;
725 	int ret;
726 
727 	if (WARN_ON(!dmabuf || !dev))
728 		return ERR_PTR(-EINVAL);
729 
730 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
731 		return ERR_PTR(-EINVAL);
732 
733 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
734 	if (!attach)
735 		return ERR_PTR(-ENOMEM);
736 
737 	attach->dev = dev;
738 	attach->dmabuf = dmabuf;
739 	if (importer_ops)
740 		attach->peer2peer = importer_ops->allow_peer2peer;
741 	attach->importer_ops = importer_ops;
742 	attach->importer_priv = importer_priv;
743 
744 	if (dmabuf->ops->attach) {
745 		ret = dmabuf->ops->attach(dmabuf, attach);
746 		if (ret)
747 			goto err_attach;
748 	}
749 	dma_resv_lock(dmabuf->resv, NULL);
750 	list_add(&attach->node, &dmabuf->attachments);
751 	dma_resv_unlock(dmabuf->resv);
752 
753 	/* When either the importer or the exporter can't handle dynamic
754 	 * mappings we cache the mapping here to avoid issues with the
755 	 * reservation object lock.
756 	 */
757 	if (dma_buf_attachment_is_dynamic(attach) !=
758 	    dma_buf_is_dynamic(dmabuf)) {
759 		struct sg_table *sgt;
760 
761 		if (dma_buf_is_dynamic(attach->dmabuf)) {
762 			dma_resv_lock(attach->dmabuf->resv, NULL);
763 			ret = dma_buf_pin(attach);
764 			if (ret)
765 				goto err_unlock;
766 		}
767 
768 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
769 		if (!sgt)
770 			sgt = ERR_PTR(-ENOMEM);
771 		if (IS_ERR(sgt)) {
772 			ret = PTR_ERR(sgt);
773 			goto err_unpin;
774 		}
775 		if (dma_buf_is_dynamic(attach->dmabuf))
776 			dma_resv_unlock(attach->dmabuf->resv);
777 		attach->sgt = sgt;
778 		attach->dir = DMA_BIDIRECTIONAL;
779 	}
780 
781 	return attach;
782 
783 err_attach:
784 	kfree(attach);
785 	return ERR_PTR(ret);
786 
787 err_unpin:
788 	if (dma_buf_is_dynamic(attach->dmabuf))
789 		dma_buf_unpin(attach);
790 
791 err_unlock:
792 	if (dma_buf_is_dynamic(attach->dmabuf))
793 		dma_resv_unlock(attach->dmabuf->resv);
794 
795 	dma_buf_detach(dmabuf, attach);
796 	return ERR_PTR(ret);
797 }
798 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
799 
800 /**
801  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
802  * @dmabuf:	[in]	buffer to attach device to.
803  * @dev:	[in]	device to be attached.
804  *
805  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
806  * mapping.
807  */
808 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
809 					  struct device *dev)
810 {
811 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
812 }
813 EXPORT_SYMBOL_GPL(dma_buf_attach);
814 
815 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
816 			    struct sg_table *sg_table,
817 			    enum dma_data_direction direction)
818 {
819 	/* uses XOR, hence this unmangles */
820 	mangle_sg_table(sg_table);
821 
822 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
823 }
824 
825 /**
826  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
827  * @dmabuf:	[in]	buffer to detach from.
828  * @attach:	[in]	attachment to be detached; is free'd after this call.
829  *
830  * Clean up a device attachment obtained by calling dma_buf_attach().
831  *
832  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
833  */
834 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
835 {
836 	if (WARN_ON(!dmabuf || !attach))
837 		return;
838 
839 	if (attach->sgt) {
840 		if (dma_buf_is_dynamic(attach->dmabuf))
841 			dma_resv_lock(attach->dmabuf->resv, NULL);
842 
843 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
844 
845 		if (dma_buf_is_dynamic(attach->dmabuf)) {
846 			dma_buf_unpin(attach);
847 			dma_resv_unlock(attach->dmabuf->resv);
848 		}
849 	}
850 
851 	dma_resv_lock(dmabuf->resv, NULL);
852 	list_del(&attach->node);
853 	dma_resv_unlock(dmabuf->resv);
854 	if (dmabuf->ops->detach)
855 		dmabuf->ops->detach(dmabuf, attach);
856 
857 	kfree(attach);
858 }
859 EXPORT_SYMBOL_GPL(dma_buf_detach);
860 
861 /**
862  * dma_buf_pin - Lock down the DMA-buf
863  * @attach:	[in]	attachment which should be pinned
864  *
865  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
866  * call this, and only for limited use cases like scanout and not for temporary
867  * pin operations. It is not permitted to allow userspace to pin arbitrary
868  * amounts of buffers through this interface.
869  *
870  * Buffers must be unpinned by calling dma_buf_unpin().
871  *
872  * Returns:
873  * 0 on success, negative error code on failure.
874  */
875 int dma_buf_pin(struct dma_buf_attachment *attach)
876 {
877 	struct dma_buf *dmabuf = attach->dmabuf;
878 	int ret = 0;
879 
880 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
881 
882 	dma_resv_assert_held(dmabuf->resv);
883 
884 	if (dmabuf->ops->pin)
885 		ret = dmabuf->ops->pin(attach);
886 
887 	return ret;
888 }
889 EXPORT_SYMBOL_GPL(dma_buf_pin);
890 
891 /**
892  * dma_buf_unpin - Unpin a DMA-buf
893  * @attach:	[in]	attachment which should be unpinned
894  *
895  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
896  * any mapping of @attach again and inform the importer through
897  * &dma_buf_attach_ops.move_notify.
898  */
899 void dma_buf_unpin(struct dma_buf_attachment *attach)
900 {
901 	struct dma_buf *dmabuf = attach->dmabuf;
902 
903 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
904 
905 	dma_resv_assert_held(dmabuf->resv);
906 
907 	if (dmabuf->ops->unpin)
908 		dmabuf->ops->unpin(attach);
909 }
910 EXPORT_SYMBOL_GPL(dma_buf_unpin);
911 
912 /**
913  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
914  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
915  * dma_buf_ops.
916  * @attach:	[in]	attachment whose scatterlist is to be returned
917  * @direction:	[in]	direction of DMA transfer
918  *
919  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
920  * on error. May return -EINTR if it is interrupted by a signal.
921  *
922  * On success, the DMA addresses and lengths in the returned scatterlist are
923  * PAGE_SIZE aligned.
924  *
925  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
926  * the underlying backing storage is pinned for as long as a mapping exists,
927  * therefore users/importers should not hold onto a mapping for undue amounts of
928  * time.
929  */
930 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
931 					enum dma_data_direction direction)
932 {
933 	struct sg_table *sg_table;
934 	int r;
935 
936 	might_sleep();
937 
938 	if (WARN_ON(!attach || !attach->dmabuf))
939 		return ERR_PTR(-EINVAL);
940 
941 	if (dma_buf_attachment_is_dynamic(attach))
942 		dma_resv_assert_held(attach->dmabuf->resv);
943 
944 	if (attach->sgt) {
945 		/*
946 		 * Two mappings with different directions for the same
947 		 * attachment are not allowed.
948 		 */
949 		if (attach->dir != direction &&
950 		    attach->dir != DMA_BIDIRECTIONAL)
951 			return ERR_PTR(-EBUSY);
952 
953 		return attach->sgt;
954 	}
955 
956 	if (dma_buf_is_dynamic(attach->dmabuf)) {
957 		dma_resv_assert_held(attach->dmabuf->resv);
958 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
959 			r = dma_buf_pin(attach);
960 			if (r)
961 				return ERR_PTR(r);
962 		}
963 	}
964 
965 	sg_table = __map_dma_buf(attach, direction);
966 	if (!sg_table)
967 		sg_table = ERR_PTR(-ENOMEM);
968 
969 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
970 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
971 		dma_buf_unpin(attach);
972 
973 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
974 		attach->sgt = sg_table;
975 		attach->dir = direction;
976 	}
977 
978 #ifdef CONFIG_DMA_API_DEBUG
979 	if (!IS_ERR(sg_table)) {
980 		struct scatterlist *sg;
981 		u64 addr;
982 		int len;
983 		int i;
984 
985 		for_each_sgtable_dma_sg(sg_table, sg, i) {
986 			addr = sg_dma_address(sg);
987 			len = sg_dma_len(sg);
988 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
989 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
990 					 __func__, addr, len);
991 			}
992 		}
993 	}
994 #endif /* CONFIG_DMA_API_DEBUG */
995 
996 	return sg_table;
997 }
998 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
999 
1000 /**
1001  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1002  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1003  * dma_buf_ops.
1004  * @attach:	[in]	attachment to unmap buffer from
1005  * @sg_table:	[in]	scatterlist info of the buffer to unmap
1006  * @direction:  [in]    direction of DMA transfer
1007  *
1008  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1009  */
1010 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1011 				struct sg_table *sg_table,
1012 				enum dma_data_direction direction)
1013 {
1014 	might_sleep();
1015 
1016 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1017 		return;
1018 
1019 	if (dma_buf_attachment_is_dynamic(attach))
1020 		dma_resv_assert_held(attach->dmabuf->resv);
1021 
1022 	if (attach->sgt == sg_table)
1023 		return;
1024 
1025 	if (dma_buf_is_dynamic(attach->dmabuf))
1026 		dma_resv_assert_held(attach->dmabuf->resv);
1027 
1028 	__unmap_dma_buf(attach, sg_table, direction);
1029 
1030 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1031 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1032 		dma_buf_unpin(attach);
1033 }
1034 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1035 
1036 /**
1037  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1038  *
1039  * @dmabuf:	[in]	buffer which is moving
1040  *
1041  * Informs all attachmenst that they need to destroy and recreated all their
1042  * mappings.
1043  */
1044 void dma_buf_move_notify(struct dma_buf *dmabuf)
1045 {
1046 	struct dma_buf_attachment *attach;
1047 
1048 	dma_resv_assert_held(dmabuf->resv);
1049 
1050 	list_for_each_entry(attach, &dmabuf->attachments, node)
1051 		if (attach->importer_ops)
1052 			attach->importer_ops->move_notify(attach);
1053 }
1054 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1055 
1056 /**
1057  * DOC: cpu access
1058  *
1059  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1060  *
1061  * - Fallback operations in the kernel, for example when a device is connected
1062  *   over USB and the kernel needs to shuffle the data around first before
1063  *   sending it away. Cache coherency is handled by braketing any transactions
1064  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1065  *   access.
1066  *
1067  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1068  *   vmap interface is introduced. Note that on very old 32-bit architectures
1069  *   vmalloc space might be limited and result in vmap calls failing.
1070  *
1071  *   Interfaces::
1072  *
1073  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1074  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1075  *
1076  *   The vmap call can fail if there is no vmap support in the exporter, or if
1077  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1078  *   count for all vmap access and calls down into the exporter's vmap function
1079  *   only when no vmapping exists, and only unmaps it once. Protection against
1080  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1081  *
1082  * - For full compatibility on the importer side with existing userspace
1083  *   interfaces, which might already support mmap'ing buffers. This is needed in
1084  *   many processing pipelines (e.g. feeding a software rendered image into a
1085  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1086  *   framework already supported this and for DMA buffer file descriptors to
1087  *   replace ION buffers mmap support was needed.
1088  *
1089  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1090  *   fd. But like for CPU access there's a need to braket the actual access,
1091  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1092  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1093  *   be restarted.
1094  *
1095  *   Some systems might need some sort of cache coherency management e.g. when
1096  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1097  *   To circumvent this problem there are begin/end coherency markers, that
1098  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1099  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1100  *   sequence would be used like following:
1101  *
1102  *     - mmap dma-buf fd
1103  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1104  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1105  *       want (with the new data being consumed by say the GPU or the scanout
1106  *       device)
1107  *     - munmap once you don't need the buffer any more
1108  *
1109  *    For correctness and optimal performance, it is always required to use
1110  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1111  *    mapped address. Userspace cannot rely on coherent access, even when there
1112  *    are systems where it just works without calling these ioctls.
1113  *
1114  * - And as a CPU fallback in userspace processing pipelines.
1115  *
1116  *   Similar to the motivation for kernel cpu access it is again important that
1117  *   the userspace code of a given importing subsystem can use the same
1118  *   interfaces with a imported dma-buf buffer object as with a native buffer
1119  *   object. This is especially important for drm where the userspace part of
1120  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1121  *   use a different way to mmap a buffer rather invasive.
1122  *
1123  *   The assumption in the current dma-buf interfaces is that redirecting the
1124  *   initial mmap is all that's needed. A survey of some of the existing
1125  *   subsystems shows that no driver seems to do any nefarious thing like
1126  *   syncing up with outstanding asynchronous processing on the device or
1127  *   allocating special resources at fault time. So hopefully this is good
1128  *   enough, since adding interfaces to intercept pagefaults and allow pte
1129  *   shootdowns would increase the complexity quite a bit.
1130  *
1131  *   Interface::
1132  *
1133  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1134  *		       unsigned long);
1135  *
1136  *   If the importing subsystem simply provides a special-purpose mmap call to
1137  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1138  *   equally achieve that for a dma-buf object.
1139  */
1140 
1141 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1142 				      enum dma_data_direction direction)
1143 {
1144 	bool write = (direction == DMA_BIDIRECTIONAL ||
1145 		      direction == DMA_TO_DEVICE);
1146 	struct dma_resv *resv = dmabuf->resv;
1147 	long ret;
1148 
1149 	/* Wait on any implicit rendering fences */
1150 	ret = dma_resv_wait_timeout_rcu(resv, write, true,
1151 						  MAX_SCHEDULE_TIMEOUT);
1152 	if (ret < 0)
1153 		return ret;
1154 
1155 	return 0;
1156 }
1157 
1158 /**
1159  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1160  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1161  * preparations. Coherency is only guaranteed in the specified range for the
1162  * specified access direction.
1163  * @dmabuf:	[in]	buffer to prepare cpu access for.
1164  * @direction:	[in]	length of range for cpu access.
1165  *
1166  * After the cpu access is complete the caller should call
1167  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1168  * it guaranteed to be coherent with other DMA access.
1169  *
1170  * This function will also wait for any DMA transactions tracked through
1171  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1172  * synchronization this function will only ensure cache coherency, callers must
1173  * ensure synchronization with such DMA transactions on their own.
1174  *
1175  * Can return negative error values, returns 0 on success.
1176  */
1177 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1178 			     enum dma_data_direction direction)
1179 {
1180 	int ret = 0;
1181 
1182 	if (WARN_ON(!dmabuf))
1183 		return -EINVAL;
1184 
1185 	might_lock(&dmabuf->resv->lock.base);
1186 
1187 	if (dmabuf->ops->begin_cpu_access)
1188 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1189 
1190 	/* Ensure that all fences are waited upon - but we first allow
1191 	 * the native handler the chance to do so more efficiently if it
1192 	 * chooses. A double invocation here will be reasonably cheap no-op.
1193 	 */
1194 	if (ret == 0)
1195 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1196 
1197 	return ret;
1198 }
1199 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1200 
1201 /**
1202  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1203  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1204  * actions. Coherency is only guaranteed in the specified range for the
1205  * specified access direction.
1206  * @dmabuf:	[in]	buffer to complete cpu access for.
1207  * @direction:	[in]	length of range for cpu access.
1208  *
1209  * This terminates CPU access started with dma_buf_begin_cpu_access().
1210  *
1211  * Can return negative error values, returns 0 on success.
1212  */
1213 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1214 			   enum dma_data_direction direction)
1215 {
1216 	int ret = 0;
1217 
1218 	WARN_ON(!dmabuf);
1219 
1220 	might_lock(&dmabuf->resv->lock.base);
1221 
1222 	if (dmabuf->ops->end_cpu_access)
1223 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1224 
1225 	return ret;
1226 }
1227 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1228 
1229 
1230 /**
1231  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1232  * @dmabuf:	[in]	buffer that should back the vma
1233  * @vma:	[in]	vma for the mmap
1234  * @pgoff:	[in]	offset in pages where this mmap should start within the
1235  *			dma-buf buffer.
1236  *
1237  * This function adjusts the passed in vma so that it points at the file of the
1238  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1239  * checking on the size of the vma. Then it calls the exporters mmap function to
1240  * set up the mapping.
1241  *
1242  * Can return negative error values, returns 0 on success.
1243  */
1244 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1245 		 unsigned long pgoff)
1246 {
1247 	if (WARN_ON(!dmabuf || !vma))
1248 		return -EINVAL;
1249 
1250 	/* check if buffer supports mmap */
1251 	if (!dmabuf->ops->mmap)
1252 		return -EINVAL;
1253 
1254 	/* check for offset overflow */
1255 	if (pgoff + vma_pages(vma) < pgoff)
1256 		return -EOVERFLOW;
1257 
1258 	/* check for overflowing the buffer's size */
1259 	if (pgoff + vma_pages(vma) >
1260 	    dmabuf->size >> PAGE_SHIFT)
1261 		return -EINVAL;
1262 
1263 	/* readjust the vma */
1264 	vma_set_file(vma, dmabuf->file);
1265 	vma->vm_pgoff = pgoff;
1266 
1267 	return dmabuf->ops->mmap(dmabuf, vma);
1268 }
1269 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1270 
1271 /**
1272  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1273  * address space. Same restrictions as for vmap and friends apply.
1274  * @dmabuf:	[in]	buffer to vmap
1275  * @map:	[out]	returns the vmap pointer
1276  *
1277  * This call may fail due to lack of virtual mapping address space.
1278  * These calls are optional in drivers. The intended use for them
1279  * is for mapping objects linear in kernel space for high use objects.
1280  *
1281  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1282  * dma_buf_end_cpu_access() around any cpu access performed through this
1283  * mapping.
1284  *
1285  * Returns 0 on success, or a negative errno code otherwise.
1286  */
1287 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1288 {
1289 	struct dma_buf_map ptr;
1290 	int ret = 0;
1291 
1292 	dma_buf_map_clear(map);
1293 
1294 	if (WARN_ON(!dmabuf))
1295 		return -EINVAL;
1296 
1297 	if (!dmabuf->ops->vmap)
1298 		return -EINVAL;
1299 
1300 	mutex_lock(&dmabuf->lock);
1301 	if (dmabuf->vmapping_counter) {
1302 		dmabuf->vmapping_counter++;
1303 		BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1304 		*map = dmabuf->vmap_ptr;
1305 		goto out_unlock;
1306 	}
1307 
1308 	BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1309 
1310 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1311 	if (WARN_ON_ONCE(ret))
1312 		goto out_unlock;
1313 
1314 	dmabuf->vmap_ptr = ptr;
1315 	dmabuf->vmapping_counter = 1;
1316 
1317 	*map = dmabuf->vmap_ptr;
1318 
1319 out_unlock:
1320 	mutex_unlock(&dmabuf->lock);
1321 	return ret;
1322 }
1323 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1324 
1325 /**
1326  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1327  * @dmabuf:	[in]	buffer to vunmap
1328  * @map:	[in]	vmap pointer to vunmap
1329  */
1330 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1331 {
1332 	if (WARN_ON(!dmabuf))
1333 		return;
1334 
1335 	BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1336 	BUG_ON(dmabuf->vmapping_counter == 0);
1337 	BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1338 
1339 	mutex_lock(&dmabuf->lock);
1340 	if (--dmabuf->vmapping_counter == 0) {
1341 		if (dmabuf->ops->vunmap)
1342 			dmabuf->ops->vunmap(dmabuf, map);
1343 		dma_buf_map_clear(&dmabuf->vmap_ptr);
1344 	}
1345 	mutex_unlock(&dmabuf->lock);
1346 }
1347 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1348 
1349 #ifdef CONFIG_DEBUG_FS
1350 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1351 {
1352 	int ret;
1353 	struct dma_buf *buf_obj;
1354 	struct dma_buf_attachment *attach_obj;
1355 	struct dma_resv *robj;
1356 	struct dma_resv_list *fobj;
1357 	struct dma_fence *fence;
1358 	unsigned seq;
1359 	int count = 0, attach_count, shared_count, i;
1360 	size_t size = 0;
1361 
1362 	ret = mutex_lock_interruptible(&db_list.lock);
1363 
1364 	if (ret)
1365 		return ret;
1366 
1367 	seq_puts(s, "\nDma-buf Objects:\n");
1368 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1369 		   "size", "flags", "mode", "count", "ino");
1370 
1371 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1372 
1373 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1374 		if (ret)
1375 			goto error_unlock;
1376 
1377 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1378 				buf_obj->size,
1379 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1380 				file_count(buf_obj->file),
1381 				buf_obj->exp_name,
1382 				file_inode(buf_obj->file)->i_ino,
1383 				buf_obj->name ?: "");
1384 
1385 		robj = buf_obj->resv;
1386 		while (true) {
1387 			seq = read_seqcount_begin(&robj->seq);
1388 			rcu_read_lock();
1389 			fobj = rcu_dereference(robj->fence);
1390 			shared_count = fobj ? fobj->shared_count : 0;
1391 			fence = rcu_dereference(robj->fence_excl);
1392 			if (!read_seqcount_retry(&robj->seq, seq))
1393 				break;
1394 			rcu_read_unlock();
1395 		}
1396 
1397 		if (fence)
1398 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1399 				   fence->ops->get_driver_name(fence),
1400 				   fence->ops->get_timeline_name(fence),
1401 				   dma_fence_is_signaled(fence) ? "" : "un");
1402 		for (i = 0; i < shared_count; i++) {
1403 			fence = rcu_dereference(fobj->shared[i]);
1404 			if (!dma_fence_get_rcu(fence))
1405 				continue;
1406 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1407 				   fence->ops->get_driver_name(fence),
1408 				   fence->ops->get_timeline_name(fence),
1409 				   dma_fence_is_signaled(fence) ? "" : "un");
1410 			dma_fence_put(fence);
1411 		}
1412 		rcu_read_unlock();
1413 
1414 		seq_puts(s, "\tAttached Devices:\n");
1415 		attach_count = 0;
1416 
1417 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1418 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1419 			attach_count++;
1420 		}
1421 		dma_resv_unlock(buf_obj->resv);
1422 
1423 		seq_printf(s, "Total %d devices attached\n\n",
1424 				attach_count);
1425 
1426 		count++;
1427 		size += buf_obj->size;
1428 	}
1429 
1430 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1431 
1432 	mutex_unlock(&db_list.lock);
1433 	return 0;
1434 
1435 error_unlock:
1436 	mutex_unlock(&db_list.lock);
1437 	return ret;
1438 }
1439 
1440 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1441 
1442 static struct dentry *dma_buf_debugfs_dir;
1443 
1444 static int dma_buf_init_debugfs(void)
1445 {
1446 	struct dentry *d;
1447 	int err = 0;
1448 
1449 	d = debugfs_create_dir("dma_buf", NULL);
1450 	if (IS_ERR(d))
1451 		return PTR_ERR(d);
1452 
1453 	dma_buf_debugfs_dir = d;
1454 
1455 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1456 				NULL, &dma_buf_debug_fops);
1457 	if (IS_ERR(d)) {
1458 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1459 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1460 		dma_buf_debugfs_dir = NULL;
1461 		err = PTR_ERR(d);
1462 	}
1463 
1464 	return err;
1465 }
1466 
1467 static void dma_buf_uninit_debugfs(void)
1468 {
1469 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1470 }
1471 #else
1472 static inline int dma_buf_init_debugfs(void)
1473 {
1474 	return 0;
1475 }
1476 static inline void dma_buf_uninit_debugfs(void)
1477 {
1478 }
1479 #endif
1480 
1481 static int __init dma_buf_init(void)
1482 {
1483 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1484 	if (IS_ERR(dma_buf_mnt))
1485 		return PTR_ERR(dma_buf_mnt);
1486 
1487 	mutex_init(&db_list.lock);
1488 	INIT_LIST_HEAD(&db_list.head);
1489 	dma_buf_init_debugfs();
1490 	return 0;
1491 }
1492 subsys_initcall(dma_buf_init);
1493 
1494 static void __exit dma_buf_deinit(void)
1495 {
1496 	dma_buf_uninit_debugfs();
1497 	kern_unmount(dma_buf_mnt);
1498 }
1499 __exitcall(dma_buf_deinit);
1500