xref: /openbmc/linux/drivers/dma-buf/dma-buf.c (revision 7328736d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Framework for buffer objects that can be shared across devices/subsystems.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 
14 #include <linux/fs.h>
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
25 #include <linux/mm.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
28 
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
31 
32 #include "dma-buf-sysfs-stats.h"
33 
34 static inline int is_dma_buf_file(struct file *);
35 
36 struct dma_buf_list {
37 	struct list_head head;
38 	struct mutex lock;
39 };
40 
41 static struct dma_buf_list db_list;
42 
43 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
44 {
45 	struct dma_buf *dmabuf;
46 	char name[DMA_BUF_NAME_LEN];
47 	size_t ret = 0;
48 
49 	dmabuf = dentry->d_fsdata;
50 	spin_lock(&dmabuf->name_lock);
51 	if (dmabuf->name)
52 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
53 	spin_unlock(&dmabuf->name_lock);
54 
55 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
56 			     dentry->d_name.name, ret > 0 ? name : "");
57 }
58 
59 static void dma_buf_release(struct dentry *dentry)
60 {
61 	struct dma_buf *dmabuf;
62 
63 	dmabuf = dentry->d_fsdata;
64 	if (unlikely(!dmabuf))
65 		return;
66 
67 	BUG_ON(dmabuf->vmapping_counter);
68 
69 	/*
70 	 * If you hit this BUG() it could mean:
71 	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
72 	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
73 	 */
74 	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
75 
76 	dma_buf_stats_teardown(dmabuf);
77 	dmabuf->ops->release(dmabuf);
78 
79 	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 		dma_resv_fini(dmabuf->resv);
81 
82 	WARN_ON(!list_empty(&dmabuf->attachments));
83 	module_put(dmabuf->owner);
84 	kfree(dmabuf->name);
85 	kfree(dmabuf);
86 }
87 
88 static int dma_buf_file_release(struct inode *inode, struct file *file)
89 {
90 	struct dma_buf *dmabuf;
91 
92 	if (!is_dma_buf_file(file))
93 		return -EINVAL;
94 
95 	dmabuf = file->private_data;
96 
97 	mutex_lock(&db_list.lock);
98 	list_del(&dmabuf->list_node);
99 	mutex_unlock(&db_list.lock);
100 
101 	return 0;
102 }
103 
104 static const struct dentry_operations dma_buf_dentry_ops = {
105 	.d_dname = dmabuffs_dname,
106 	.d_release = dma_buf_release,
107 };
108 
109 static struct vfsmount *dma_buf_mnt;
110 
111 static int dma_buf_fs_init_context(struct fs_context *fc)
112 {
113 	struct pseudo_fs_context *ctx;
114 
115 	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
116 	if (!ctx)
117 		return -ENOMEM;
118 	ctx->dops = &dma_buf_dentry_ops;
119 	return 0;
120 }
121 
122 static struct file_system_type dma_buf_fs_type = {
123 	.name = "dmabuf",
124 	.init_fs_context = dma_buf_fs_init_context,
125 	.kill_sb = kill_anon_super,
126 };
127 
128 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
129 {
130 	struct dma_buf *dmabuf;
131 
132 	if (!is_dma_buf_file(file))
133 		return -EINVAL;
134 
135 	dmabuf = file->private_data;
136 
137 	/* check if buffer supports mmap */
138 	if (!dmabuf->ops->mmap)
139 		return -EINVAL;
140 
141 	/* check for overflowing the buffer's size */
142 	if (vma->vm_pgoff + vma_pages(vma) >
143 	    dmabuf->size >> PAGE_SHIFT)
144 		return -EINVAL;
145 
146 	return dmabuf->ops->mmap(dmabuf, vma);
147 }
148 
149 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
150 {
151 	struct dma_buf *dmabuf;
152 	loff_t base;
153 
154 	if (!is_dma_buf_file(file))
155 		return -EBADF;
156 
157 	dmabuf = file->private_data;
158 
159 	/* only support discovering the end of the buffer,
160 	   but also allow SEEK_SET to maintain the idiomatic
161 	   SEEK_END(0), SEEK_CUR(0) pattern */
162 	if (whence == SEEK_END)
163 		base = dmabuf->size;
164 	else if (whence == SEEK_SET)
165 		base = 0;
166 	else
167 		return -EINVAL;
168 
169 	if (offset != 0)
170 		return -EINVAL;
171 
172 	return base + offset;
173 }
174 
175 /**
176  * DOC: implicit fence polling
177  *
178  * To support cross-device and cross-driver synchronization of buffer access
179  * implicit fences (represented internally in the kernel with &struct dma_fence)
180  * can be attached to a &dma_buf. The glue for that and a few related things are
181  * provided in the &dma_resv structure.
182  *
183  * Userspace can query the state of these implicitly tracked fences using poll()
184  * and related system calls:
185  *
186  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
187  *   most recent write or exclusive fence.
188  *
189  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
190  *   all attached fences, shared and exclusive ones.
191  *
192  * Note that this only signals the completion of the respective fences, i.e. the
193  * DMA transfers are complete. Cache flushing and any other necessary
194  * preparations before CPU access can begin still need to happen.
195  */
196 
197 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
198 {
199 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
200 	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
201 	unsigned long flags;
202 
203 	spin_lock_irqsave(&dcb->poll->lock, flags);
204 	wake_up_locked_poll(dcb->poll, dcb->active);
205 	dcb->active = 0;
206 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
207 	dma_fence_put(fence);
208 	/* Paired with get_file in dma_buf_poll */
209 	fput(dmabuf->file);
210 }
211 
212 static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
213 				struct dma_buf_poll_cb_t *dcb)
214 {
215 	struct dma_resv_iter cursor;
216 	struct dma_fence *fence;
217 	int r;
218 
219 	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
220 				fence) {
221 		dma_fence_get(fence);
222 		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
223 		if (!r)
224 			return true;
225 		dma_fence_put(fence);
226 	}
227 
228 	return false;
229 }
230 
231 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
232 {
233 	struct dma_buf *dmabuf;
234 	struct dma_resv *resv;
235 	__poll_t events;
236 
237 	dmabuf = file->private_data;
238 	if (!dmabuf || !dmabuf->resv)
239 		return EPOLLERR;
240 
241 	resv = dmabuf->resv;
242 
243 	poll_wait(file, &dmabuf->poll, poll);
244 
245 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
246 	if (!events)
247 		return 0;
248 
249 	dma_resv_lock(resv, NULL);
250 
251 	if (events & EPOLLOUT) {
252 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
253 
254 		/* Check that callback isn't busy */
255 		spin_lock_irq(&dmabuf->poll.lock);
256 		if (dcb->active)
257 			events &= ~EPOLLOUT;
258 		else
259 			dcb->active = EPOLLOUT;
260 		spin_unlock_irq(&dmabuf->poll.lock);
261 
262 		if (events & EPOLLOUT) {
263 			/* Paired with fput in dma_buf_poll_cb */
264 			get_file(dmabuf->file);
265 
266 			if (!dma_buf_poll_add_cb(resv, true, dcb))
267 				/* No callback queued, wake up any other waiters */
268 				dma_buf_poll_cb(NULL, &dcb->cb);
269 			else
270 				events &= ~EPOLLOUT;
271 		}
272 	}
273 
274 	if (events & EPOLLIN) {
275 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
276 
277 		/* Check that callback isn't busy */
278 		spin_lock_irq(&dmabuf->poll.lock);
279 		if (dcb->active)
280 			events &= ~EPOLLIN;
281 		else
282 			dcb->active = EPOLLIN;
283 		spin_unlock_irq(&dmabuf->poll.lock);
284 
285 		if (events & EPOLLIN) {
286 			/* Paired with fput in dma_buf_poll_cb */
287 			get_file(dmabuf->file);
288 
289 			if (!dma_buf_poll_add_cb(resv, false, dcb))
290 				/* No callback queued, wake up any other waiters */
291 				dma_buf_poll_cb(NULL, &dcb->cb);
292 			else
293 				events &= ~EPOLLIN;
294 		}
295 	}
296 
297 	dma_resv_unlock(resv);
298 	return events;
299 }
300 
301 /**
302  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
303  * It could support changing the name of the dma-buf if the same
304  * piece of memory is used for multiple purpose between different devices.
305  *
306  * @dmabuf: [in]     dmabuf buffer that will be renamed.
307  * @buf:    [in]     A piece of userspace memory that contains the name of
308  *                   the dma-buf.
309  *
310  * Returns 0 on success. If the dma-buf buffer is already attached to
311  * devices, return -EBUSY.
312  *
313  */
314 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
315 {
316 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
317 
318 	if (IS_ERR(name))
319 		return PTR_ERR(name);
320 
321 	spin_lock(&dmabuf->name_lock);
322 	kfree(dmabuf->name);
323 	dmabuf->name = name;
324 	spin_unlock(&dmabuf->name_lock);
325 
326 	return 0;
327 }
328 
329 static long dma_buf_ioctl(struct file *file,
330 			  unsigned int cmd, unsigned long arg)
331 {
332 	struct dma_buf *dmabuf;
333 	struct dma_buf_sync sync;
334 	enum dma_data_direction direction;
335 	int ret;
336 
337 	dmabuf = file->private_data;
338 
339 	switch (cmd) {
340 	case DMA_BUF_IOCTL_SYNC:
341 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
342 			return -EFAULT;
343 
344 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
345 			return -EINVAL;
346 
347 		switch (sync.flags & DMA_BUF_SYNC_RW) {
348 		case DMA_BUF_SYNC_READ:
349 			direction = DMA_FROM_DEVICE;
350 			break;
351 		case DMA_BUF_SYNC_WRITE:
352 			direction = DMA_TO_DEVICE;
353 			break;
354 		case DMA_BUF_SYNC_RW:
355 			direction = DMA_BIDIRECTIONAL;
356 			break;
357 		default:
358 			return -EINVAL;
359 		}
360 
361 		if (sync.flags & DMA_BUF_SYNC_END)
362 			ret = dma_buf_end_cpu_access(dmabuf, direction);
363 		else
364 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
365 
366 		return ret;
367 
368 	case DMA_BUF_SET_NAME_A:
369 	case DMA_BUF_SET_NAME_B:
370 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
371 
372 	default:
373 		return -ENOTTY;
374 	}
375 }
376 
377 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
378 {
379 	struct dma_buf *dmabuf = file->private_data;
380 
381 	seq_printf(m, "size:\t%zu\n", dmabuf->size);
382 	/* Don't count the temporary reference taken inside procfs seq_show */
383 	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
384 	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
385 	spin_lock(&dmabuf->name_lock);
386 	if (dmabuf->name)
387 		seq_printf(m, "name:\t%s\n", dmabuf->name);
388 	spin_unlock(&dmabuf->name_lock);
389 }
390 
391 static const struct file_operations dma_buf_fops = {
392 	.release	= dma_buf_file_release,
393 	.mmap		= dma_buf_mmap_internal,
394 	.llseek		= dma_buf_llseek,
395 	.poll		= dma_buf_poll,
396 	.unlocked_ioctl	= dma_buf_ioctl,
397 	.compat_ioctl	= compat_ptr_ioctl,
398 	.show_fdinfo	= dma_buf_show_fdinfo,
399 };
400 
401 /*
402  * is_dma_buf_file - Check if struct file* is associated with dma_buf
403  */
404 static inline int is_dma_buf_file(struct file *file)
405 {
406 	return file->f_op == &dma_buf_fops;
407 }
408 
409 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
410 {
411 	struct file *file;
412 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
413 
414 	if (IS_ERR(inode))
415 		return ERR_CAST(inode);
416 
417 	inode->i_size = dmabuf->size;
418 	inode_set_bytes(inode, dmabuf->size);
419 
420 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
421 				 flags, &dma_buf_fops);
422 	if (IS_ERR(file))
423 		goto err_alloc_file;
424 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
425 	file->private_data = dmabuf;
426 	file->f_path.dentry->d_fsdata = dmabuf;
427 
428 	return file;
429 
430 err_alloc_file:
431 	iput(inode);
432 	return file;
433 }
434 
435 /**
436  * DOC: dma buf device access
437  *
438  * For device DMA access to a shared DMA buffer the usual sequence of operations
439  * is fairly simple:
440  *
441  * 1. The exporter defines his exporter instance using
442  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
443  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
444  *    as a file descriptor by calling dma_buf_fd().
445  *
446  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
447  *    to share with: First the file descriptor is converted to a &dma_buf using
448  *    dma_buf_get(). Then the buffer is attached to the device using
449  *    dma_buf_attach().
450  *
451  *    Up to this stage the exporter is still free to migrate or reallocate the
452  *    backing storage.
453  *
454  * 3. Once the buffer is attached to all devices userspace can initiate DMA
455  *    access to the shared buffer. In the kernel this is done by calling
456  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
457  *
458  * 4. Once a driver is done with a shared buffer it needs to call
459  *    dma_buf_detach() (after cleaning up any mappings) and then release the
460  *    reference acquired with dma_buf_get() by calling dma_buf_put().
461  *
462  * For the detailed semantics exporters are expected to implement see
463  * &dma_buf_ops.
464  */
465 
466 /**
467  * dma_buf_export - Creates a new dma_buf, and associates an anon file
468  * with this buffer, so it can be exported.
469  * Also connect the allocator specific data and ops to the buffer.
470  * Additionally, provide a name string for exporter; useful in debugging.
471  *
472  * @exp_info:	[in]	holds all the export related information provided
473  *			by the exporter. see &struct dma_buf_export_info
474  *			for further details.
475  *
476  * Returns, on success, a newly created struct dma_buf object, which wraps the
477  * supplied private data and operations for struct dma_buf_ops. On either
478  * missing ops, or error in allocating struct dma_buf, will return negative
479  * error.
480  *
481  * For most cases the easiest way to create @exp_info is through the
482  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
483  */
484 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
485 {
486 	struct dma_buf *dmabuf;
487 	struct dma_resv *resv = exp_info->resv;
488 	struct file *file;
489 	size_t alloc_size = sizeof(struct dma_buf);
490 	int ret;
491 
492 	if (!exp_info->resv)
493 		alloc_size += sizeof(struct dma_resv);
494 	else
495 		/* prevent &dma_buf[1] == dma_buf->resv */
496 		alloc_size += 1;
497 
498 	if (WARN_ON(!exp_info->priv
499 			  || !exp_info->ops
500 			  || !exp_info->ops->map_dma_buf
501 			  || !exp_info->ops->unmap_dma_buf
502 			  || !exp_info->ops->release)) {
503 		return ERR_PTR(-EINVAL);
504 	}
505 
506 	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
507 		    (exp_info->ops->pin || exp_info->ops->unpin)))
508 		return ERR_PTR(-EINVAL);
509 
510 	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
511 		return ERR_PTR(-EINVAL);
512 
513 	if (!try_module_get(exp_info->owner))
514 		return ERR_PTR(-ENOENT);
515 
516 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
517 	if (!dmabuf) {
518 		ret = -ENOMEM;
519 		goto err_module;
520 	}
521 
522 	dmabuf->priv = exp_info->priv;
523 	dmabuf->ops = exp_info->ops;
524 	dmabuf->size = exp_info->size;
525 	dmabuf->exp_name = exp_info->exp_name;
526 	dmabuf->owner = exp_info->owner;
527 	spin_lock_init(&dmabuf->name_lock);
528 	init_waitqueue_head(&dmabuf->poll);
529 	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
530 	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
531 
532 	if (!resv) {
533 		resv = (struct dma_resv *)&dmabuf[1];
534 		dma_resv_init(resv);
535 	}
536 	dmabuf->resv = resv;
537 
538 	file = dma_buf_getfile(dmabuf, exp_info->flags);
539 	if (IS_ERR(file)) {
540 		ret = PTR_ERR(file);
541 		goto err_dmabuf;
542 	}
543 
544 	file->f_mode |= FMODE_LSEEK;
545 	dmabuf->file = file;
546 
547 	ret = dma_buf_stats_setup(dmabuf);
548 	if (ret)
549 		goto err_sysfs;
550 
551 	mutex_init(&dmabuf->lock);
552 	INIT_LIST_HEAD(&dmabuf->attachments);
553 
554 	mutex_lock(&db_list.lock);
555 	list_add(&dmabuf->list_node, &db_list.head);
556 	mutex_unlock(&db_list.lock);
557 
558 	return dmabuf;
559 
560 err_sysfs:
561 	/*
562 	 * Set file->f_path.dentry->d_fsdata to NULL so that when
563 	 * dma_buf_release() gets invoked by dentry_ops, it exits
564 	 * early before calling the release() dma_buf op.
565 	 */
566 	file->f_path.dentry->d_fsdata = NULL;
567 	fput(file);
568 err_dmabuf:
569 	kfree(dmabuf);
570 err_module:
571 	module_put(exp_info->owner);
572 	return ERR_PTR(ret);
573 }
574 EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
575 
576 /**
577  * dma_buf_fd - returns a file descriptor for the given struct dma_buf
578  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
579  * @flags:      [in]    flags to give to fd
580  *
581  * On success, returns an associated 'fd'. Else, returns error.
582  */
583 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
584 {
585 	int fd;
586 
587 	if (!dmabuf || !dmabuf->file)
588 		return -EINVAL;
589 
590 	fd = get_unused_fd_flags(flags);
591 	if (fd < 0)
592 		return fd;
593 
594 	fd_install(fd, dmabuf->file);
595 
596 	return fd;
597 }
598 EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
599 
600 /**
601  * dma_buf_get - returns the struct dma_buf related to an fd
602  * @fd:	[in]	fd associated with the struct dma_buf to be returned
603  *
604  * On success, returns the struct dma_buf associated with an fd; uses
605  * file's refcounting done by fget to increase refcount. returns ERR_PTR
606  * otherwise.
607  */
608 struct dma_buf *dma_buf_get(int fd)
609 {
610 	struct file *file;
611 
612 	file = fget(fd);
613 
614 	if (!file)
615 		return ERR_PTR(-EBADF);
616 
617 	if (!is_dma_buf_file(file)) {
618 		fput(file);
619 		return ERR_PTR(-EINVAL);
620 	}
621 
622 	return file->private_data;
623 }
624 EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
625 
626 /**
627  * dma_buf_put - decreases refcount of the buffer
628  * @dmabuf:	[in]	buffer to reduce refcount of
629  *
630  * Uses file's refcounting done implicitly by fput().
631  *
632  * If, as a result of this call, the refcount becomes 0, the 'release' file
633  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
634  * in turn, and frees the memory allocated for dmabuf when exported.
635  */
636 void dma_buf_put(struct dma_buf *dmabuf)
637 {
638 	if (WARN_ON(!dmabuf || !dmabuf->file))
639 		return;
640 
641 	fput(dmabuf->file);
642 }
643 EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
644 
645 static void mangle_sg_table(struct sg_table *sg_table)
646 {
647 #ifdef CONFIG_DMABUF_DEBUG
648 	int i;
649 	struct scatterlist *sg;
650 
651 	/* To catch abuse of the underlying struct page by importers mix
652 	 * up the bits, but take care to preserve the low SG_ bits to
653 	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
654 	 * before passing the sgt back to the exporter. */
655 	for_each_sgtable_sg(sg_table, sg, i)
656 		sg->page_link ^= ~0xffUL;
657 #endif
658 
659 }
660 static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
661 				       enum dma_data_direction direction)
662 {
663 	struct sg_table *sg_table;
664 	signed long ret;
665 
666 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
667 	if (IS_ERR_OR_NULL(sg_table))
668 		return sg_table;
669 
670 	if (!dma_buf_attachment_is_dynamic(attach)) {
671 		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
672 					    DMA_RESV_USAGE_KERNEL, true,
673 					    MAX_SCHEDULE_TIMEOUT);
674 		if (ret < 0) {
675 			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
676 							   direction);
677 			return ERR_PTR(ret);
678 		}
679 	}
680 
681 	mangle_sg_table(sg_table);
682 	return sg_table;
683 }
684 
685 /**
686  * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
687  * @dmabuf:		[in]	buffer to attach device to.
688  * @dev:		[in]	device to be attached.
689  * @importer_ops:	[in]	importer operations for the attachment
690  * @importer_priv:	[in]	importer private pointer for the attachment
691  *
692  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
693  * must be cleaned up by calling dma_buf_detach().
694  *
695  * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
696  * functionality.
697  *
698  * Returns:
699  *
700  * A pointer to newly created &dma_buf_attachment on success, or a negative
701  * error code wrapped into a pointer on failure.
702  *
703  * Note that this can fail if the backing storage of @dmabuf is in a place not
704  * accessible to @dev, and cannot be moved to a more suitable place. This is
705  * indicated with the error code -EBUSY.
706  */
707 struct dma_buf_attachment *
708 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
709 		       const struct dma_buf_attach_ops *importer_ops,
710 		       void *importer_priv)
711 {
712 	struct dma_buf_attachment *attach;
713 	int ret;
714 
715 	if (WARN_ON(!dmabuf || !dev))
716 		return ERR_PTR(-EINVAL);
717 
718 	if (WARN_ON(importer_ops && !importer_ops->move_notify))
719 		return ERR_PTR(-EINVAL);
720 
721 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
722 	if (!attach)
723 		return ERR_PTR(-ENOMEM);
724 
725 	attach->dev = dev;
726 	attach->dmabuf = dmabuf;
727 	if (importer_ops)
728 		attach->peer2peer = importer_ops->allow_peer2peer;
729 	attach->importer_ops = importer_ops;
730 	attach->importer_priv = importer_priv;
731 
732 	if (dmabuf->ops->attach) {
733 		ret = dmabuf->ops->attach(dmabuf, attach);
734 		if (ret)
735 			goto err_attach;
736 	}
737 	dma_resv_lock(dmabuf->resv, NULL);
738 	list_add(&attach->node, &dmabuf->attachments);
739 	dma_resv_unlock(dmabuf->resv);
740 
741 	/* When either the importer or the exporter can't handle dynamic
742 	 * mappings we cache the mapping here to avoid issues with the
743 	 * reservation object lock.
744 	 */
745 	if (dma_buf_attachment_is_dynamic(attach) !=
746 	    dma_buf_is_dynamic(dmabuf)) {
747 		struct sg_table *sgt;
748 
749 		if (dma_buf_is_dynamic(attach->dmabuf)) {
750 			dma_resv_lock(attach->dmabuf->resv, NULL);
751 			ret = dmabuf->ops->pin(attach);
752 			if (ret)
753 				goto err_unlock;
754 		}
755 
756 		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
757 		if (!sgt)
758 			sgt = ERR_PTR(-ENOMEM);
759 		if (IS_ERR(sgt)) {
760 			ret = PTR_ERR(sgt);
761 			goto err_unpin;
762 		}
763 		if (dma_buf_is_dynamic(attach->dmabuf))
764 			dma_resv_unlock(attach->dmabuf->resv);
765 		attach->sgt = sgt;
766 		attach->dir = DMA_BIDIRECTIONAL;
767 	}
768 
769 	return attach;
770 
771 err_attach:
772 	kfree(attach);
773 	return ERR_PTR(ret);
774 
775 err_unpin:
776 	if (dma_buf_is_dynamic(attach->dmabuf))
777 		dmabuf->ops->unpin(attach);
778 
779 err_unlock:
780 	if (dma_buf_is_dynamic(attach->dmabuf))
781 		dma_resv_unlock(attach->dmabuf->resv);
782 
783 	dma_buf_detach(dmabuf, attach);
784 	return ERR_PTR(ret);
785 }
786 EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
787 
788 /**
789  * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
790  * @dmabuf:	[in]	buffer to attach device to.
791  * @dev:	[in]	device to be attached.
792  *
793  * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
794  * mapping.
795  */
796 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
797 					  struct device *dev)
798 {
799 	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
800 }
801 EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
802 
803 static void __unmap_dma_buf(struct dma_buf_attachment *attach,
804 			    struct sg_table *sg_table,
805 			    enum dma_data_direction direction)
806 {
807 	/* uses XOR, hence this unmangles */
808 	mangle_sg_table(sg_table);
809 
810 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
811 }
812 
813 /**
814  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
815  * @dmabuf:	[in]	buffer to detach from.
816  * @attach:	[in]	attachment to be detached; is free'd after this call.
817  *
818  * Clean up a device attachment obtained by calling dma_buf_attach().
819  *
820  * Optionally this calls &dma_buf_ops.detach for device-specific detach.
821  */
822 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
823 {
824 	if (WARN_ON(!dmabuf || !attach))
825 		return;
826 
827 	if (attach->sgt) {
828 		if (dma_buf_is_dynamic(attach->dmabuf))
829 			dma_resv_lock(attach->dmabuf->resv, NULL);
830 
831 		__unmap_dma_buf(attach, attach->sgt, attach->dir);
832 
833 		if (dma_buf_is_dynamic(attach->dmabuf)) {
834 			dmabuf->ops->unpin(attach);
835 			dma_resv_unlock(attach->dmabuf->resv);
836 		}
837 	}
838 
839 	dma_resv_lock(dmabuf->resv, NULL);
840 	list_del(&attach->node);
841 	dma_resv_unlock(dmabuf->resv);
842 	if (dmabuf->ops->detach)
843 		dmabuf->ops->detach(dmabuf, attach);
844 
845 	kfree(attach);
846 }
847 EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
848 
849 /**
850  * dma_buf_pin - Lock down the DMA-buf
851  * @attach:	[in]	attachment which should be pinned
852  *
853  * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
854  * call this, and only for limited use cases like scanout and not for temporary
855  * pin operations. It is not permitted to allow userspace to pin arbitrary
856  * amounts of buffers through this interface.
857  *
858  * Buffers must be unpinned by calling dma_buf_unpin().
859  *
860  * Returns:
861  * 0 on success, negative error code on failure.
862  */
863 int dma_buf_pin(struct dma_buf_attachment *attach)
864 {
865 	struct dma_buf *dmabuf = attach->dmabuf;
866 	int ret = 0;
867 
868 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
869 
870 	dma_resv_assert_held(dmabuf->resv);
871 
872 	if (dmabuf->ops->pin)
873 		ret = dmabuf->ops->pin(attach);
874 
875 	return ret;
876 }
877 EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
878 
879 /**
880  * dma_buf_unpin - Unpin a DMA-buf
881  * @attach:	[in]	attachment which should be unpinned
882  *
883  * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
884  * any mapping of @attach again and inform the importer through
885  * &dma_buf_attach_ops.move_notify.
886  */
887 void dma_buf_unpin(struct dma_buf_attachment *attach)
888 {
889 	struct dma_buf *dmabuf = attach->dmabuf;
890 
891 	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
892 
893 	dma_resv_assert_held(dmabuf->resv);
894 
895 	if (dmabuf->ops->unpin)
896 		dmabuf->ops->unpin(attach);
897 }
898 EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
899 
900 /**
901  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
902  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
903  * dma_buf_ops.
904  * @attach:	[in]	attachment whose scatterlist is to be returned
905  * @direction:	[in]	direction of DMA transfer
906  *
907  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
908  * on error. May return -EINTR if it is interrupted by a signal.
909  *
910  * On success, the DMA addresses and lengths in the returned scatterlist are
911  * PAGE_SIZE aligned.
912  *
913  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
914  * the underlying backing storage is pinned for as long as a mapping exists,
915  * therefore users/importers should not hold onto a mapping for undue amounts of
916  * time.
917  *
918  * Important: Dynamic importers must wait for the exclusive fence of the struct
919  * dma_resv attached to the DMA-BUF first.
920  */
921 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
922 					enum dma_data_direction direction)
923 {
924 	struct sg_table *sg_table;
925 	int r;
926 
927 	might_sleep();
928 
929 	if (WARN_ON(!attach || !attach->dmabuf))
930 		return ERR_PTR(-EINVAL);
931 
932 	if (dma_buf_attachment_is_dynamic(attach))
933 		dma_resv_assert_held(attach->dmabuf->resv);
934 
935 	if (attach->sgt) {
936 		/*
937 		 * Two mappings with different directions for the same
938 		 * attachment are not allowed.
939 		 */
940 		if (attach->dir != direction &&
941 		    attach->dir != DMA_BIDIRECTIONAL)
942 			return ERR_PTR(-EBUSY);
943 
944 		return attach->sgt;
945 	}
946 
947 	if (dma_buf_is_dynamic(attach->dmabuf)) {
948 		dma_resv_assert_held(attach->dmabuf->resv);
949 		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
950 			r = attach->dmabuf->ops->pin(attach);
951 			if (r)
952 				return ERR_PTR(r);
953 		}
954 	}
955 
956 	sg_table = __map_dma_buf(attach, direction);
957 	if (!sg_table)
958 		sg_table = ERR_PTR(-ENOMEM);
959 
960 	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
961 	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
962 		attach->dmabuf->ops->unpin(attach);
963 
964 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
965 		attach->sgt = sg_table;
966 		attach->dir = direction;
967 	}
968 
969 #ifdef CONFIG_DMA_API_DEBUG
970 	if (!IS_ERR(sg_table)) {
971 		struct scatterlist *sg;
972 		u64 addr;
973 		int len;
974 		int i;
975 
976 		for_each_sgtable_dma_sg(sg_table, sg, i) {
977 			addr = sg_dma_address(sg);
978 			len = sg_dma_len(sg);
979 			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
980 				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
981 					 __func__, addr, len);
982 			}
983 		}
984 	}
985 #endif /* CONFIG_DMA_API_DEBUG */
986 	return sg_table;
987 }
988 EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
989 
990 /**
991  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
992  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
993  * dma_buf_ops.
994  * @attach:	[in]	attachment to unmap buffer from
995  * @sg_table:	[in]	scatterlist info of the buffer to unmap
996  * @direction:  [in]    direction of DMA transfer
997  *
998  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
999  */
1000 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1001 				struct sg_table *sg_table,
1002 				enum dma_data_direction direction)
1003 {
1004 	might_sleep();
1005 
1006 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1007 		return;
1008 
1009 	if (dma_buf_attachment_is_dynamic(attach))
1010 		dma_resv_assert_held(attach->dmabuf->resv);
1011 
1012 	if (attach->sgt == sg_table)
1013 		return;
1014 
1015 	if (dma_buf_is_dynamic(attach->dmabuf))
1016 		dma_resv_assert_held(attach->dmabuf->resv);
1017 
1018 	__unmap_dma_buf(attach, sg_table, direction);
1019 
1020 	if (dma_buf_is_dynamic(attach->dmabuf) &&
1021 	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1022 		dma_buf_unpin(attach);
1023 }
1024 EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1025 
1026 /**
1027  * dma_buf_move_notify - notify attachments that DMA-buf is moving
1028  *
1029  * @dmabuf:	[in]	buffer which is moving
1030  *
1031  * Informs all attachmenst that they need to destroy and recreated all their
1032  * mappings.
1033  */
1034 void dma_buf_move_notify(struct dma_buf *dmabuf)
1035 {
1036 	struct dma_buf_attachment *attach;
1037 
1038 	dma_resv_assert_held(dmabuf->resv);
1039 
1040 	list_for_each_entry(attach, &dmabuf->attachments, node)
1041 		if (attach->importer_ops)
1042 			attach->importer_ops->move_notify(attach);
1043 }
1044 EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1045 
1046 /**
1047  * DOC: cpu access
1048  *
1049  * There are mutliple reasons for supporting CPU access to a dma buffer object:
1050  *
1051  * - Fallback operations in the kernel, for example when a device is connected
1052  *   over USB and the kernel needs to shuffle the data around first before
1053  *   sending it away. Cache coherency is handled by braketing any transactions
1054  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1055  *   access.
1056  *
1057  *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1058  *   vmap interface is introduced. Note that on very old 32-bit architectures
1059  *   vmalloc space might be limited and result in vmap calls failing.
1060  *
1061  *   Interfaces::
1062  *
1063  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1064  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1065  *
1066  *   The vmap call can fail if there is no vmap support in the exporter, or if
1067  *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1068  *   count for all vmap access and calls down into the exporter's vmap function
1069  *   only when no vmapping exists, and only unmaps it once. Protection against
1070  *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1071  *
1072  * - For full compatibility on the importer side with existing userspace
1073  *   interfaces, which might already support mmap'ing buffers. This is needed in
1074  *   many processing pipelines (e.g. feeding a software rendered image into a
1075  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1076  *   framework already supported this and for DMA buffer file descriptors to
1077  *   replace ION buffers mmap support was needed.
1078  *
1079  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1080  *   fd. But like for CPU access there's a need to braket the actual access,
1081  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1082  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1083  *   be restarted.
1084  *
1085  *   Some systems might need some sort of cache coherency management e.g. when
1086  *   CPU and GPU domains are being accessed through dma-buf at the same time.
1087  *   To circumvent this problem there are begin/end coherency markers, that
1088  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1089  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1090  *   sequence would be used like following:
1091  *
1092  *     - mmap dma-buf fd
1093  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1094  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1095  *       want (with the new data being consumed by say the GPU or the scanout
1096  *       device)
1097  *     - munmap once you don't need the buffer any more
1098  *
1099  *    For correctness and optimal performance, it is always required to use
1100  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1101  *    mapped address. Userspace cannot rely on coherent access, even when there
1102  *    are systems where it just works without calling these ioctls.
1103  *
1104  * - And as a CPU fallback in userspace processing pipelines.
1105  *
1106  *   Similar to the motivation for kernel cpu access it is again important that
1107  *   the userspace code of a given importing subsystem can use the same
1108  *   interfaces with a imported dma-buf buffer object as with a native buffer
1109  *   object. This is especially important for drm where the userspace part of
1110  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1111  *   use a different way to mmap a buffer rather invasive.
1112  *
1113  *   The assumption in the current dma-buf interfaces is that redirecting the
1114  *   initial mmap is all that's needed. A survey of some of the existing
1115  *   subsystems shows that no driver seems to do any nefarious thing like
1116  *   syncing up with outstanding asynchronous processing on the device or
1117  *   allocating special resources at fault time. So hopefully this is good
1118  *   enough, since adding interfaces to intercept pagefaults and allow pte
1119  *   shootdowns would increase the complexity quite a bit.
1120  *
1121  *   Interface::
1122  *
1123  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1124  *		       unsigned long);
1125  *
1126  *   If the importing subsystem simply provides a special-purpose mmap call to
1127  *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1128  *   equally achieve that for a dma-buf object.
1129  */
1130 
1131 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1132 				      enum dma_data_direction direction)
1133 {
1134 	bool write = (direction == DMA_BIDIRECTIONAL ||
1135 		      direction == DMA_TO_DEVICE);
1136 	struct dma_resv *resv = dmabuf->resv;
1137 	long ret;
1138 
1139 	/* Wait on any implicit rendering fences */
1140 	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1141 				    true, MAX_SCHEDULE_TIMEOUT);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	return 0;
1146 }
1147 
1148 /**
1149  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1150  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1151  * preparations. Coherency is only guaranteed in the specified range for the
1152  * specified access direction.
1153  * @dmabuf:	[in]	buffer to prepare cpu access for.
1154  * @direction:	[in]	length of range for cpu access.
1155  *
1156  * After the cpu access is complete the caller should call
1157  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1158  * it guaranteed to be coherent with other DMA access.
1159  *
1160  * This function will also wait for any DMA transactions tracked through
1161  * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1162  * synchronization this function will only ensure cache coherency, callers must
1163  * ensure synchronization with such DMA transactions on their own.
1164  *
1165  * Can return negative error values, returns 0 on success.
1166  */
1167 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1168 			     enum dma_data_direction direction)
1169 {
1170 	int ret = 0;
1171 
1172 	if (WARN_ON(!dmabuf))
1173 		return -EINVAL;
1174 
1175 	might_lock(&dmabuf->resv->lock.base);
1176 
1177 	if (dmabuf->ops->begin_cpu_access)
1178 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1179 
1180 	/* Ensure that all fences are waited upon - but we first allow
1181 	 * the native handler the chance to do so more efficiently if it
1182 	 * chooses. A double invocation here will be reasonably cheap no-op.
1183 	 */
1184 	if (ret == 0)
1185 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1186 
1187 	return ret;
1188 }
1189 EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1190 
1191 /**
1192  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1193  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1194  * actions. Coherency is only guaranteed in the specified range for the
1195  * specified access direction.
1196  * @dmabuf:	[in]	buffer to complete cpu access for.
1197  * @direction:	[in]	length of range for cpu access.
1198  *
1199  * This terminates CPU access started with dma_buf_begin_cpu_access().
1200  *
1201  * Can return negative error values, returns 0 on success.
1202  */
1203 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1204 			   enum dma_data_direction direction)
1205 {
1206 	int ret = 0;
1207 
1208 	WARN_ON(!dmabuf);
1209 
1210 	might_lock(&dmabuf->resv->lock.base);
1211 
1212 	if (dmabuf->ops->end_cpu_access)
1213 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1214 
1215 	return ret;
1216 }
1217 EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1218 
1219 
1220 /**
1221  * dma_buf_mmap - Setup up a userspace mmap with the given vma
1222  * @dmabuf:	[in]	buffer that should back the vma
1223  * @vma:	[in]	vma for the mmap
1224  * @pgoff:	[in]	offset in pages where this mmap should start within the
1225  *			dma-buf buffer.
1226  *
1227  * This function adjusts the passed in vma so that it points at the file of the
1228  * dma_buf operation. It also adjusts the starting pgoff and does bounds
1229  * checking on the size of the vma. Then it calls the exporters mmap function to
1230  * set up the mapping.
1231  *
1232  * Can return negative error values, returns 0 on success.
1233  */
1234 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1235 		 unsigned long pgoff)
1236 {
1237 	if (WARN_ON(!dmabuf || !vma))
1238 		return -EINVAL;
1239 
1240 	/* check if buffer supports mmap */
1241 	if (!dmabuf->ops->mmap)
1242 		return -EINVAL;
1243 
1244 	/* check for offset overflow */
1245 	if (pgoff + vma_pages(vma) < pgoff)
1246 		return -EOVERFLOW;
1247 
1248 	/* check for overflowing the buffer's size */
1249 	if (pgoff + vma_pages(vma) >
1250 	    dmabuf->size >> PAGE_SHIFT)
1251 		return -EINVAL;
1252 
1253 	/* readjust the vma */
1254 	vma_set_file(vma, dmabuf->file);
1255 	vma->vm_pgoff = pgoff;
1256 
1257 	return dmabuf->ops->mmap(dmabuf, vma);
1258 }
1259 EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1260 
1261 /**
1262  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1263  * address space. Same restrictions as for vmap and friends apply.
1264  * @dmabuf:	[in]	buffer to vmap
1265  * @map:	[out]	returns the vmap pointer
1266  *
1267  * This call may fail due to lack of virtual mapping address space.
1268  * These calls are optional in drivers. The intended use for them
1269  * is for mapping objects linear in kernel space for high use objects.
1270  *
1271  * To ensure coherency users must call dma_buf_begin_cpu_access() and
1272  * dma_buf_end_cpu_access() around any cpu access performed through this
1273  * mapping.
1274  *
1275  * Returns 0 on success, or a negative errno code otherwise.
1276  */
1277 int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1278 {
1279 	struct iosys_map ptr;
1280 	int ret = 0;
1281 
1282 	iosys_map_clear(map);
1283 
1284 	if (WARN_ON(!dmabuf))
1285 		return -EINVAL;
1286 
1287 	if (!dmabuf->ops->vmap)
1288 		return -EINVAL;
1289 
1290 	mutex_lock(&dmabuf->lock);
1291 	if (dmabuf->vmapping_counter) {
1292 		dmabuf->vmapping_counter++;
1293 		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1294 		*map = dmabuf->vmap_ptr;
1295 		goto out_unlock;
1296 	}
1297 
1298 	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1299 
1300 	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1301 	if (WARN_ON_ONCE(ret))
1302 		goto out_unlock;
1303 
1304 	dmabuf->vmap_ptr = ptr;
1305 	dmabuf->vmapping_counter = 1;
1306 
1307 	*map = dmabuf->vmap_ptr;
1308 
1309 out_unlock:
1310 	mutex_unlock(&dmabuf->lock);
1311 	return ret;
1312 }
1313 EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1314 
1315 /**
1316  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1317  * @dmabuf:	[in]	buffer to vunmap
1318  * @map:	[in]	vmap pointer to vunmap
1319  */
1320 void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1321 {
1322 	if (WARN_ON(!dmabuf))
1323 		return;
1324 
1325 	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1326 	BUG_ON(dmabuf->vmapping_counter == 0);
1327 	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1328 
1329 	mutex_lock(&dmabuf->lock);
1330 	if (--dmabuf->vmapping_counter == 0) {
1331 		if (dmabuf->ops->vunmap)
1332 			dmabuf->ops->vunmap(dmabuf, map);
1333 		iosys_map_clear(&dmabuf->vmap_ptr);
1334 	}
1335 	mutex_unlock(&dmabuf->lock);
1336 }
1337 EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1338 
1339 #ifdef CONFIG_DEBUG_FS
1340 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1341 {
1342 	struct dma_buf *buf_obj;
1343 	struct dma_buf_attachment *attach_obj;
1344 	int count = 0, attach_count;
1345 	size_t size = 0;
1346 	int ret;
1347 
1348 	ret = mutex_lock_interruptible(&db_list.lock);
1349 
1350 	if (ret)
1351 		return ret;
1352 
1353 	seq_puts(s, "\nDma-buf Objects:\n");
1354 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1355 		   "size", "flags", "mode", "count", "ino");
1356 
1357 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1358 
1359 		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1360 		if (ret)
1361 			goto error_unlock;
1362 
1363 
1364 		spin_lock(&buf_obj->name_lock);
1365 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1366 				buf_obj->size,
1367 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1368 				file_count(buf_obj->file),
1369 				buf_obj->exp_name,
1370 				file_inode(buf_obj->file)->i_ino,
1371 				buf_obj->name ?: "<none>");
1372 		spin_unlock(&buf_obj->name_lock);
1373 
1374 		dma_resv_describe(buf_obj->resv, s);
1375 
1376 		seq_puts(s, "\tAttached Devices:\n");
1377 		attach_count = 0;
1378 
1379 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1380 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1381 			attach_count++;
1382 		}
1383 		dma_resv_unlock(buf_obj->resv);
1384 
1385 		seq_printf(s, "Total %d devices attached\n\n",
1386 				attach_count);
1387 
1388 		count++;
1389 		size += buf_obj->size;
1390 	}
1391 
1392 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1393 
1394 	mutex_unlock(&db_list.lock);
1395 	return 0;
1396 
1397 error_unlock:
1398 	mutex_unlock(&db_list.lock);
1399 	return ret;
1400 }
1401 
1402 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1403 
1404 static struct dentry *dma_buf_debugfs_dir;
1405 
1406 static int dma_buf_init_debugfs(void)
1407 {
1408 	struct dentry *d;
1409 	int err = 0;
1410 
1411 	d = debugfs_create_dir("dma_buf", NULL);
1412 	if (IS_ERR(d))
1413 		return PTR_ERR(d);
1414 
1415 	dma_buf_debugfs_dir = d;
1416 
1417 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1418 				NULL, &dma_buf_debug_fops);
1419 	if (IS_ERR(d)) {
1420 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1421 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1422 		dma_buf_debugfs_dir = NULL;
1423 		err = PTR_ERR(d);
1424 	}
1425 
1426 	return err;
1427 }
1428 
1429 static void dma_buf_uninit_debugfs(void)
1430 {
1431 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1432 }
1433 #else
1434 static inline int dma_buf_init_debugfs(void)
1435 {
1436 	return 0;
1437 }
1438 static inline void dma_buf_uninit_debugfs(void)
1439 {
1440 }
1441 #endif
1442 
1443 static int __init dma_buf_init(void)
1444 {
1445 	int ret;
1446 
1447 	ret = dma_buf_init_sysfs_statistics();
1448 	if (ret)
1449 		return ret;
1450 
1451 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1452 	if (IS_ERR(dma_buf_mnt))
1453 		return PTR_ERR(dma_buf_mnt);
1454 
1455 	mutex_init(&db_list.lock);
1456 	INIT_LIST_HEAD(&db_list.head);
1457 	dma_buf_init_debugfs();
1458 	return 0;
1459 }
1460 subsys_initcall(dma_buf_init);
1461 
1462 static void __exit dma_buf_deinit(void)
1463 {
1464 	dma_buf_uninit_debugfs();
1465 	kern_unmount(dma_buf_mnt);
1466 	dma_buf_uninit_sysfs_statistics();
1467 }
1468 __exitcall(dma_buf_deinit);
1469