xref: /openbmc/linux/drivers/dma-buf/dma-buf.c (revision 7f904d7e1f3ec7c2de47c024a5a5c30988b54703)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Framework for buffer objects that can be shared across devices/subsystems.
4   *
5   * Copyright(C) 2011 Linaro Limited. All rights reserved.
6   * Author: Sumit Semwal <sumit.semwal@ti.com>
7   *
8   * Many thanks to linaro-mm-sig list, and specially
9   * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10   * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11   * refining of this idea.
12   */
13  
14  #include <linux/fs.h>
15  #include <linux/slab.h>
16  #include <linux/dma-buf.h>
17  #include <linux/dma-fence.h>
18  #include <linux/anon_inodes.h>
19  #include <linux/export.h>
20  #include <linux/debugfs.h>
21  #include <linux/module.h>
22  #include <linux/seq_file.h>
23  #include <linux/poll.h>
24  #include <linux/reservation.h>
25  #include <linux/mm.h>
26  
27  #include <uapi/linux/dma-buf.h>
28  
29  static inline int is_dma_buf_file(struct file *);
30  
31  struct dma_buf_list {
32  	struct list_head head;
33  	struct mutex lock;
34  };
35  
36  static struct dma_buf_list db_list;
37  
38  static int dma_buf_release(struct inode *inode, struct file *file)
39  {
40  	struct dma_buf *dmabuf;
41  
42  	if (!is_dma_buf_file(file))
43  		return -EINVAL;
44  
45  	dmabuf = file->private_data;
46  
47  	BUG_ON(dmabuf->vmapping_counter);
48  
49  	/*
50  	 * Any fences that a dma-buf poll can wait on should be signaled
51  	 * before releasing dma-buf. This is the responsibility of each
52  	 * driver that uses the reservation objects.
53  	 *
54  	 * If you hit this BUG() it means someone dropped their ref to the
55  	 * dma-buf while still having pending operation to the buffer.
56  	 */
57  	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
58  
59  	dmabuf->ops->release(dmabuf);
60  
61  	mutex_lock(&db_list.lock);
62  	list_del(&dmabuf->list_node);
63  	mutex_unlock(&db_list.lock);
64  
65  	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
66  		reservation_object_fini(dmabuf->resv);
67  
68  	module_put(dmabuf->owner);
69  	kfree(dmabuf);
70  	return 0;
71  }
72  
73  static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
74  {
75  	struct dma_buf *dmabuf;
76  
77  	if (!is_dma_buf_file(file))
78  		return -EINVAL;
79  
80  	dmabuf = file->private_data;
81  
82  	/* check for overflowing the buffer's size */
83  	if (vma->vm_pgoff + vma_pages(vma) >
84  	    dmabuf->size >> PAGE_SHIFT)
85  		return -EINVAL;
86  
87  	return dmabuf->ops->mmap(dmabuf, vma);
88  }
89  
90  static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
91  {
92  	struct dma_buf *dmabuf;
93  	loff_t base;
94  
95  	if (!is_dma_buf_file(file))
96  		return -EBADF;
97  
98  	dmabuf = file->private_data;
99  
100  	/* only support discovering the end of the buffer,
101  	   but also allow SEEK_SET to maintain the idiomatic
102  	   SEEK_END(0), SEEK_CUR(0) pattern */
103  	if (whence == SEEK_END)
104  		base = dmabuf->size;
105  	else if (whence == SEEK_SET)
106  		base = 0;
107  	else
108  		return -EINVAL;
109  
110  	if (offset != 0)
111  		return -EINVAL;
112  
113  	return base + offset;
114  }
115  
116  /**
117   * DOC: fence polling
118   *
119   * To support cross-device and cross-driver synchronization of buffer access
120   * implicit fences (represented internally in the kernel with &struct fence) can
121   * be attached to a &dma_buf. The glue for that and a few related things are
122   * provided in the &reservation_object structure.
123   *
124   * Userspace can query the state of these implicitly tracked fences using poll()
125   * and related system calls:
126   *
127   * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
128   *   most recent write or exclusive fence.
129   *
130   * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
131   *   all attached fences, shared and exclusive ones.
132   *
133   * Note that this only signals the completion of the respective fences, i.e. the
134   * DMA transfers are complete. Cache flushing and any other necessary
135   * preparations before CPU access can begin still need to happen.
136   */
137  
138  static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
139  {
140  	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
141  	unsigned long flags;
142  
143  	spin_lock_irqsave(&dcb->poll->lock, flags);
144  	wake_up_locked_poll(dcb->poll, dcb->active);
145  	dcb->active = 0;
146  	spin_unlock_irqrestore(&dcb->poll->lock, flags);
147  }
148  
149  static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
150  {
151  	struct dma_buf *dmabuf;
152  	struct reservation_object *resv;
153  	struct reservation_object_list *fobj;
154  	struct dma_fence *fence_excl;
155  	__poll_t events;
156  	unsigned shared_count, seq;
157  
158  	dmabuf = file->private_data;
159  	if (!dmabuf || !dmabuf->resv)
160  		return EPOLLERR;
161  
162  	resv = dmabuf->resv;
163  
164  	poll_wait(file, &dmabuf->poll, poll);
165  
166  	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
167  	if (!events)
168  		return 0;
169  
170  retry:
171  	seq = read_seqcount_begin(&resv->seq);
172  	rcu_read_lock();
173  
174  	fobj = rcu_dereference(resv->fence);
175  	if (fobj)
176  		shared_count = fobj->shared_count;
177  	else
178  		shared_count = 0;
179  	fence_excl = rcu_dereference(resv->fence_excl);
180  	if (read_seqcount_retry(&resv->seq, seq)) {
181  		rcu_read_unlock();
182  		goto retry;
183  	}
184  
185  	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
186  		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
187  		__poll_t pevents = EPOLLIN;
188  
189  		if (shared_count == 0)
190  			pevents |= EPOLLOUT;
191  
192  		spin_lock_irq(&dmabuf->poll.lock);
193  		if (dcb->active) {
194  			dcb->active |= pevents;
195  			events &= ~pevents;
196  		} else
197  			dcb->active = pevents;
198  		spin_unlock_irq(&dmabuf->poll.lock);
199  
200  		if (events & pevents) {
201  			if (!dma_fence_get_rcu(fence_excl)) {
202  				/* force a recheck */
203  				events &= ~pevents;
204  				dma_buf_poll_cb(NULL, &dcb->cb);
205  			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
206  							   dma_buf_poll_cb)) {
207  				events &= ~pevents;
208  				dma_fence_put(fence_excl);
209  			} else {
210  				/*
211  				 * No callback queued, wake up any additional
212  				 * waiters.
213  				 */
214  				dma_fence_put(fence_excl);
215  				dma_buf_poll_cb(NULL, &dcb->cb);
216  			}
217  		}
218  	}
219  
220  	if ((events & EPOLLOUT) && shared_count > 0) {
221  		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
222  		int i;
223  
224  		/* Only queue a new callback if no event has fired yet */
225  		spin_lock_irq(&dmabuf->poll.lock);
226  		if (dcb->active)
227  			events &= ~EPOLLOUT;
228  		else
229  			dcb->active = EPOLLOUT;
230  		spin_unlock_irq(&dmabuf->poll.lock);
231  
232  		if (!(events & EPOLLOUT))
233  			goto out;
234  
235  		for (i = 0; i < shared_count; ++i) {
236  			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
237  
238  			if (!dma_fence_get_rcu(fence)) {
239  				/*
240  				 * fence refcount dropped to zero, this means
241  				 * that fobj has been freed
242  				 *
243  				 * call dma_buf_poll_cb and force a recheck!
244  				 */
245  				events &= ~EPOLLOUT;
246  				dma_buf_poll_cb(NULL, &dcb->cb);
247  				break;
248  			}
249  			if (!dma_fence_add_callback(fence, &dcb->cb,
250  						    dma_buf_poll_cb)) {
251  				dma_fence_put(fence);
252  				events &= ~EPOLLOUT;
253  				break;
254  			}
255  			dma_fence_put(fence);
256  		}
257  
258  		/* No callback queued, wake up any additional waiters. */
259  		if (i == shared_count)
260  			dma_buf_poll_cb(NULL, &dcb->cb);
261  	}
262  
263  out:
264  	rcu_read_unlock();
265  	return events;
266  }
267  
268  static long dma_buf_ioctl(struct file *file,
269  			  unsigned int cmd, unsigned long arg)
270  {
271  	struct dma_buf *dmabuf;
272  	struct dma_buf_sync sync;
273  	enum dma_data_direction direction;
274  	int ret;
275  
276  	dmabuf = file->private_data;
277  
278  	switch (cmd) {
279  	case DMA_BUF_IOCTL_SYNC:
280  		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
281  			return -EFAULT;
282  
283  		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
284  			return -EINVAL;
285  
286  		switch (sync.flags & DMA_BUF_SYNC_RW) {
287  		case DMA_BUF_SYNC_READ:
288  			direction = DMA_FROM_DEVICE;
289  			break;
290  		case DMA_BUF_SYNC_WRITE:
291  			direction = DMA_TO_DEVICE;
292  			break;
293  		case DMA_BUF_SYNC_RW:
294  			direction = DMA_BIDIRECTIONAL;
295  			break;
296  		default:
297  			return -EINVAL;
298  		}
299  
300  		if (sync.flags & DMA_BUF_SYNC_END)
301  			ret = dma_buf_end_cpu_access(dmabuf, direction);
302  		else
303  			ret = dma_buf_begin_cpu_access(dmabuf, direction);
304  
305  		return ret;
306  	default:
307  		return -ENOTTY;
308  	}
309  }
310  
311  static const struct file_operations dma_buf_fops = {
312  	.release	= dma_buf_release,
313  	.mmap		= dma_buf_mmap_internal,
314  	.llseek		= dma_buf_llseek,
315  	.poll		= dma_buf_poll,
316  	.unlocked_ioctl	= dma_buf_ioctl,
317  #ifdef CONFIG_COMPAT
318  	.compat_ioctl	= dma_buf_ioctl,
319  #endif
320  };
321  
322  /*
323   * is_dma_buf_file - Check if struct file* is associated with dma_buf
324   */
325  static inline int is_dma_buf_file(struct file *file)
326  {
327  	return file->f_op == &dma_buf_fops;
328  }
329  
330  /**
331   * DOC: dma buf device access
332   *
333   * For device DMA access to a shared DMA buffer the usual sequence of operations
334   * is fairly simple:
335   *
336   * 1. The exporter defines his exporter instance using
337   *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
338   *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
339   *    as a file descriptor by calling dma_buf_fd().
340   *
341   * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
342   *    to share with: First the filedescriptor is converted to a &dma_buf using
343   *    dma_buf_get(). Then the buffer is attached to the device using
344   *    dma_buf_attach().
345   *
346   *    Up to this stage the exporter is still free to migrate or reallocate the
347   *    backing storage.
348   *
349   * 3. Once the buffer is attached to all devices userspace can initiate DMA
350   *    access to the shared buffer. In the kernel this is done by calling
351   *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
352   *
353   * 4. Once a driver is done with a shared buffer it needs to call
354   *    dma_buf_detach() (after cleaning up any mappings) and then release the
355   *    reference acquired with dma_buf_get by calling dma_buf_put().
356   *
357   * For the detailed semantics exporters are expected to implement see
358   * &dma_buf_ops.
359   */
360  
361  /**
362   * dma_buf_export - Creates a new dma_buf, and associates an anon file
363   * with this buffer, so it can be exported.
364   * Also connect the allocator specific data and ops to the buffer.
365   * Additionally, provide a name string for exporter; useful in debugging.
366   *
367   * @exp_info:	[in]	holds all the export related information provided
368   *			by the exporter. see &struct dma_buf_export_info
369   *			for further details.
370   *
371   * Returns, on success, a newly created dma_buf object, which wraps the
372   * supplied private data and operations for dma_buf_ops. On either missing
373   * ops, or error in allocating struct dma_buf, will return negative error.
374   *
375   * For most cases the easiest way to create @exp_info is through the
376   * %DEFINE_DMA_BUF_EXPORT_INFO macro.
377   */
378  struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
379  {
380  	struct dma_buf *dmabuf;
381  	struct reservation_object *resv = exp_info->resv;
382  	struct file *file;
383  	size_t alloc_size = sizeof(struct dma_buf);
384  	int ret;
385  
386  	if (!exp_info->resv)
387  		alloc_size += sizeof(struct reservation_object);
388  	else
389  		/* prevent &dma_buf[1] == dma_buf->resv */
390  		alloc_size += 1;
391  
392  	if (WARN_ON(!exp_info->priv
393  			  || !exp_info->ops
394  			  || !exp_info->ops->map_dma_buf
395  			  || !exp_info->ops->unmap_dma_buf
396  			  || !exp_info->ops->release
397  			  || !exp_info->ops->mmap)) {
398  		return ERR_PTR(-EINVAL);
399  	}
400  
401  	if (!try_module_get(exp_info->owner))
402  		return ERR_PTR(-ENOENT);
403  
404  	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
405  	if (!dmabuf) {
406  		ret = -ENOMEM;
407  		goto err_module;
408  	}
409  
410  	dmabuf->priv = exp_info->priv;
411  	dmabuf->ops = exp_info->ops;
412  	dmabuf->size = exp_info->size;
413  	dmabuf->exp_name = exp_info->exp_name;
414  	dmabuf->owner = exp_info->owner;
415  	init_waitqueue_head(&dmabuf->poll);
416  	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
417  	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
418  
419  	if (!resv) {
420  		resv = (struct reservation_object *)&dmabuf[1];
421  		reservation_object_init(resv);
422  	}
423  	dmabuf->resv = resv;
424  
425  	file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
426  					exp_info->flags);
427  	if (IS_ERR(file)) {
428  		ret = PTR_ERR(file);
429  		goto err_dmabuf;
430  	}
431  
432  	file->f_mode |= FMODE_LSEEK;
433  	dmabuf->file = file;
434  
435  	mutex_init(&dmabuf->lock);
436  	INIT_LIST_HEAD(&dmabuf->attachments);
437  
438  	mutex_lock(&db_list.lock);
439  	list_add(&dmabuf->list_node, &db_list.head);
440  	mutex_unlock(&db_list.lock);
441  
442  	return dmabuf;
443  
444  err_dmabuf:
445  	kfree(dmabuf);
446  err_module:
447  	module_put(exp_info->owner);
448  	return ERR_PTR(ret);
449  }
450  EXPORT_SYMBOL_GPL(dma_buf_export);
451  
452  /**
453   * dma_buf_fd - returns a file descriptor for the given dma_buf
454   * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
455   * @flags:      [in]    flags to give to fd
456   *
457   * On success, returns an associated 'fd'. Else, returns error.
458   */
459  int dma_buf_fd(struct dma_buf *dmabuf, int flags)
460  {
461  	int fd;
462  
463  	if (!dmabuf || !dmabuf->file)
464  		return -EINVAL;
465  
466  	fd = get_unused_fd_flags(flags);
467  	if (fd < 0)
468  		return fd;
469  
470  	fd_install(fd, dmabuf->file);
471  
472  	return fd;
473  }
474  EXPORT_SYMBOL_GPL(dma_buf_fd);
475  
476  /**
477   * dma_buf_get - returns the dma_buf structure related to an fd
478   * @fd:	[in]	fd associated with the dma_buf to be returned
479   *
480   * On success, returns the dma_buf structure associated with an fd; uses
481   * file's refcounting done by fget to increase refcount. returns ERR_PTR
482   * otherwise.
483   */
484  struct dma_buf *dma_buf_get(int fd)
485  {
486  	struct file *file;
487  
488  	file = fget(fd);
489  
490  	if (!file)
491  		return ERR_PTR(-EBADF);
492  
493  	if (!is_dma_buf_file(file)) {
494  		fput(file);
495  		return ERR_PTR(-EINVAL);
496  	}
497  
498  	return file->private_data;
499  }
500  EXPORT_SYMBOL_GPL(dma_buf_get);
501  
502  /**
503   * dma_buf_put - decreases refcount of the buffer
504   * @dmabuf:	[in]	buffer to reduce refcount of
505   *
506   * Uses file's refcounting done implicitly by fput().
507   *
508   * If, as a result of this call, the refcount becomes 0, the 'release' file
509   * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
510   * in turn, and frees the memory allocated for dmabuf when exported.
511   */
512  void dma_buf_put(struct dma_buf *dmabuf)
513  {
514  	if (WARN_ON(!dmabuf || !dmabuf->file))
515  		return;
516  
517  	fput(dmabuf->file);
518  }
519  EXPORT_SYMBOL_GPL(dma_buf_put);
520  
521  /**
522   * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
523   * calls attach() of dma_buf_ops to allow device-specific attach functionality
524   * @dmabuf:	[in]	buffer to attach device to.
525   * @dev:	[in]	device to be attached.
526   *
527   * Returns struct dma_buf_attachment pointer for this attachment. Attachments
528   * must be cleaned up by calling dma_buf_detach().
529   *
530   * Returns:
531   *
532   * A pointer to newly created &dma_buf_attachment on success, or a negative
533   * error code wrapped into a pointer on failure.
534   *
535   * Note that this can fail if the backing storage of @dmabuf is in a place not
536   * accessible to @dev, and cannot be moved to a more suitable place. This is
537   * indicated with the error code -EBUSY.
538   */
539  struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
540  					  struct device *dev)
541  {
542  	struct dma_buf_attachment *attach;
543  	int ret;
544  
545  	if (WARN_ON(!dmabuf || !dev))
546  		return ERR_PTR(-EINVAL);
547  
548  	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
549  	if (!attach)
550  		return ERR_PTR(-ENOMEM);
551  
552  	attach->dev = dev;
553  	attach->dmabuf = dmabuf;
554  
555  	mutex_lock(&dmabuf->lock);
556  
557  	if (dmabuf->ops->attach) {
558  		ret = dmabuf->ops->attach(dmabuf, attach);
559  		if (ret)
560  			goto err_attach;
561  	}
562  	list_add(&attach->node, &dmabuf->attachments);
563  
564  	mutex_unlock(&dmabuf->lock);
565  	return attach;
566  
567  err_attach:
568  	kfree(attach);
569  	mutex_unlock(&dmabuf->lock);
570  	return ERR_PTR(ret);
571  }
572  EXPORT_SYMBOL_GPL(dma_buf_attach);
573  
574  /**
575   * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
576   * optionally calls detach() of dma_buf_ops for device-specific detach
577   * @dmabuf:	[in]	buffer to detach from.
578   * @attach:	[in]	attachment to be detached; is free'd after this call.
579   *
580   * Clean up a device attachment obtained by calling dma_buf_attach().
581   */
582  void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
583  {
584  	if (WARN_ON(!dmabuf || !attach))
585  		return;
586  
587  	mutex_lock(&dmabuf->lock);
588  	list_del(&attach->node);
589  	if (dmabuf->ops->detach)
590  		dmabuf->ops->detach(dmabuf, attach);
591  
592  	mutex_unlock(&dmabuf->lock);
593  	kfree(attach);
594  }
595  EXPORT_SYMBOL_GPL(dma_buf_detach);
596  
597  /**
598   * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
599   * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
600   * dma_buf_ops.
601   * @attach:	[in]	attachment whose scatterlist is to be returned
602   * @direction:	[in]	direction of DMA transfer
603   *
604   * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
605   * on error. May return -EINTR if it is interrupted by a signal.
606   *
607   * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
608   * the underlying backing storage is pinned for as long as a mapping exists,
609   * therefore users/importers should not hold onto a mapping for undue amounts of
610   * time.
611   */
612  struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
613  					enum dma_data_direction direction)
614  {
615  	struct sg_table *sg_table;
616  
617  	might_sleep();
618  
619  	if (WARN_ON(!attach || !attach->dmabuf))
620  		return ERR_PTR(-EINVAL);
621  
622  	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
623  	if (!sg_table)
624  		sg_table = ERR_PTR(-ENOMEM);
625  
626  	return sg_table;
627  }
628  EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
629  
630  /**
631   * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
632   * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
633   * dma_buf_ops.
634   * @attach:	[in]	attachment to unmap buffer from
635   * @sg_table:	[in]	scatterlist info of the buffer to unmap
636   * @direction:  [in]    direction of DMA transfer
637   *
638   * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
639   */
640  void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
641  				struct sg_table *sg_table,
642  				enum dma_data_direction direction)
643  {
644  	might_sleep();
645  
646  	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
647  		return;
648  
649  	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
650  						direction);
651  }
652  EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
653  
654  /**
655   * DOC: cpu access
656   *
657   * There are mutliple reasons for supporting CPU access to a dma buffer object:
658   *
659   * - Fallback operations in the kernel, for example when a device is connected
660   *   over USB and the kernel needs to shuffle the data around first before
661   *   sending it away. Cache coherency is handled by braketing any transactions
662   *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
663   *   access.
664   *
665   *   To support dma_buf objects residing in highmem cpu access is page-based
666   *   using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
667   *   of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
668   *   returns a pointer in kernel virtual address space. Afterwards the chunk
669   *   needs to be unmapped again. There is no limit on how often a given chunk
670   *   can be mapped and unmapped, i.e. the importer does not need to call
671   *   begin_cpu_access again before mapping the same chunk again.
672   *
673   *   Interfaces::
674   *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
675   *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
676   *
677   *   Implementing the functions is optional for exporters and for importers all
678   *   the restrictions of using kmap apply.
679   *
680   *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
681   *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
682   *   the partial chunks at the beginning and end but may return stale or bogus
683   *   data outside of the range (in these partial chunks).
684   *
685   *   For some cases the overhead of kmap can be too high, a vmap interface
686   *   is introduced. This interface should be used very carefully, as vmalloc
687   *   space is a limited resources on many architectures.
688   *
689   *   Interfaces::
690   *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
691   *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
692   *
693   *   The vmap call can fail if there is no vmap support in the exporter, or if
694   *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
695   *   that the dma-buf layer keeps a reference count for all vmap access and
696   *   calls down into the exporter's vmap function only when no vmapping exists,
697   *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
698   *   provided by taking the dma_buf->lock mutex.
699   *
700   * - For full compatibility on the importer side with existing userspace
701   *   interfaces, which might already support mmap'ing buffers. This is needed in
702   *   many processing pipelines (e.g. feeding a software rendered image into a
703   *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
704   *   framework already supported this and for DMA buffer file descriptors to
705   *   replace ION buffers mmap support was needed.
706   *
707   *   There is no special interfaces, userspace simply calls mmap on the dma-buf
708   *   fd. But like for CPU access there's a need to braket the actual access,
709   *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
710   *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
711   *   be restarted.
712   *
713   *   Some systems might need some sort of cache coherency management e.g. when
714   *   CPU and GPU domains are being accessed through dma-buf at the same time.
715   *   To circumvent this problem there are begin/end coherency markers, that
716   *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
717   *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
718   *   sequence would be used like following:
719   *
720   *     - mmap dma-buf fd
721   *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
722   *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
723   *       want (with the new data being consumed by say the GPU or the scanout
724   *       device)
725   *     - munmap once you don't need the buffer any more
726   *
727   *    For correctness and optimal performance, it is always required to use
728   *    SYNC_START and SYNC_END before and after, respectively, when accessing the
729   *    mapped address. Userspace cannot rely on coherent access, even when there
730   *    are systems where it just works without calling these ioctls.
731   *
732   * - And as a CPU fallback in userspace processing pipelines.
733   *
734   *   Similar to the motivation for kernel cpu access it is again important that
735   *   the userspace code of a given importing subsystem can use the same
736   *   interfaces with a imported dma-buf buffer object as with a native buffer
737   *   object. This is especially important for drm where the userspace part of
738   *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
739   *   use a different way to mmap a buffer rather invasive.
740   *
741   *   The assumption in the current dma-buf interfaces is that redirecting the
742   *   initial mmap is all that's needed. A survey of some of the existing
743   *   subsystems shows that no driver seems to do any nefarious thing like
744   *   syncing up with outstanding asynchronous processing on the device or
745   *   allocating special resources at fault time. So hopefully this is good
746   *   enough, since adding interfaces to intercept pagefaults and allow pte
747   *   shootdowns would increase the complexity quite a bit.
748   *
749   *   Interface::
750   *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
751   *		       unsigned long);
752   *
753   *   If the importing subsystem simply provides a special-purpose mmap call to
754   *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
755   *   equally achieve that for a dma-buf object.
756   */
757  
758  static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
759  				      enum dma_data_direction direction)
760  {
761  	bool write = (direction == DMA_BIDIRECTIONAL ||
762  		      direction == DMA_TO_DEVICE);
763  	struct reservation_object *resv = dmabuf->resv;
764  	long ret;
765  
766  	/* Wait on any implicit rendering fences */
767  	ret = reservation_object_wait_timeout_rcu(resv, write, true,
768  						  MAX_SCHEDULE_TIMEOUT);
769  	if (ret < 0)
770  		return ret;
771  
772  	return 0;
773  }
774  
775  /**
776   * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
777   * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
778   * preparations. Coherency is only guaranteed in the specified range for the
779   * specified access direction.
780   * @dmabuf:	[in]	buffer to prepare cpu access for.
781   * @direction:	[in]	length of range for cpu access.
782   *
783   * After the cpu access is complete the caller should call
784   * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
785   * it guaranteed to be coherent with other DMA access.
786   *
787   * Can return negative error values, returns 0 on success.
788   */
789  int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
790  			     enum dma_data_direction direction)
791  {
792  	int ret = 0;
793  
794  	if (WARN_ON(!dmabuf))
795  		return -EINVAL;
796  
797  	if (dmabuf->ops->begin_cpu_access)
798  		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
799  
800  	/* Ensure that all fences are waited upon - but we first allow
801  	 * the native handler the chance to do so more efficiently if it
802  	 * chooses. A double invocation here will be reasonably cheap no-op.
803  	 */
804  	if (ret == 0)
805  		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
806  
807  	return ret;
808  }
809  EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
810  
811  /**
812   * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
813   * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
814   * actions. Coherency is only guaranteed in the specified range for the
815   * specified access direction.
816   * @dmabuf:	[in]	buffer to complete cpu access for.
817   * @direction:	[in]	length of range for cpu access.
818   *
819   * This terminates CPU access started with dma_buf_begin_cpu_access().
820   *
821   * Can return negative error values, returns 0 on success.
822   */
823  int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
824  			   enum dma_data_direction direction)
825  {
826  	int ret = 0;
827  
828  	WARN_ON(!dmabuf);
829  
830  	if (dmabuf->ops->end_cpu_access)
831  		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
832  
833  	return ret;
834  }
835  EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
836  
837  /**
838   * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
839   * same restrictions as for kmap and friends apply.
840   * @dmabuf:	[in]	buffer to map page from.
841   * @page_num:	[in]	page in PAGE_SIZE units to map.
842   *
843   * This call must always succeed, any necessary preparations that might fail
844   * need to be done in begin_cpu_access.
845   */
846  void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
847  {
848  	WARN_ON(!dmabuf);
849  
850  	if (!dmabuf->ops->map)
851  		return NULL;
852  	return dmabuf->ops->map(dmabuf, page_num);
853  }
854  EXPORT_SYMBOL_GPL(dma_buf_kmap);
855  
856  /**
857   * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
858   * @dmabuf:	[in]	buffer to unmap page from.
859   * @page_num:	[in]	page in PAGE_SIZE units to unmap.
860   * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap.
861   *
862   * This call must always succeed.
863   */
864  void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
865  		    void *vaddr)
866  {
867  	WARN_ON(!dmabuf);
868  
869  	if (dmabuf->ops->unmap)
870  		dmabuf->ops->unmap(dmabuf, page_num, vaddr);
871  }
872  EXPORT_SYMBOL_GPL(dma_buf_kunmap);
873  
874  
875  /**
876   * dma_buf_mmap - Setup up a userspace mmap with the given vma
877   * @dmabuf:	[in]	buffer that should back the vma
878   * @vma:	[in]	vma for the mmap
879   * @pgoff:	[in]	offset in pages where this mmap should start within the
880   *			dma-buf buffer.
881   *
882   * This function adjusts the passed in vma so that it points at the file of the
883   * dma_buf operation. It also adjusts the starting pgoff and does bounds
884   * checking on the size of the vma. Then it calls the exporters mmap function to
885   * set up the mapping.
886   *
887   * Can return negative error values, returns 0 on success.
888   */
889  int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
890  		 unsigned long pgoff)
891  {
892  	struct file *oldfile;
893  	int ret;
894  
895  	if (WARN_ON(!dmabuf || !vma))
896  		return -EINVAL;
897  
898  	/* check for offset overflow */
899  	if (pgoff + vma_pages(vma) < pgoff)
900  		return -EOVERFLOW;
901  
902  	/* check for overflowing the buffer's size */
903  	if (pgoff + vma_pages(vma) >
904  	    dmabuf->size >> PAGE_SHIFT)
905  		return -EINVAL;
906  
907  	/* readjust the vma */
908  	get_file(dmabuf->file);
909  	oldfile = vma->vm_file;
910  	vma->vm_file = dmabuf->file;
911  	vma->vm_pgoff = pgoff;
912  
913  	ret = dmabuf->ops->mmap(dmabuf, vma);
914  	if (ret) {
915  		/* restore old parameters on failure */
916  		vma->vm_file = oldfile;
917  		fput(dmabuf->file);
918  	} else {
919  		if (oldfile)
920  			fput(oldfile);
921  	}
922  	return ret;
923  
924  }
925  EXPORT_SYMBOL_GPL(dma_buf_mmap);
926  
927  /**
928   * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
929   * address space. Same restrictions as for vmap and friends apply.
930   * @dmabuf:	[in]	buffer to vmap
931   *
932   * This call may fail due to lack of virtual mapping address space.
933   * These calls are optional in drivers. The intended use for them
934   * is for mapping objects linear in kernel space for high use objects.
935   * Please attempt to use kmap/kunmap before thinking about these interfaces.
936   *
937   * Returns NULL on error.
938   */
939  void *dma_buf_vmap(struct dma_buf *dmabuf)
940  {
941  	void *ptr;
942  
943  	if (WARN_ON(!dmabuf))
944  		return NULL;
945  
946  	if (!dmabuf->ops->vmap)
947  		return NULL;
948  
949  	mutex_lock(&dmabuf->lock);
950  	if (dmabuf->vmapping_counter) {
951  		dmabuf->vmapping_counter++;
952  		BUG_ON(!dmabuf->vmap_ptr);
953  		ptr = dmabuf->vmap_ptr;
954  		goto out_unlock;
955  	}
956  
957  	BUG_ON(dmabuf->vmap_ptr);
958  
959  	ptr = dmabuf->ops->vmap(dmabuf);
960  	if (WARN_ON_ONCE(IS_ERR(ptr)))
961  		ptr = NULL;
962  	if (!ptr)
963  		goto out_unlock;
964  
965  	dmabuf->vmap_ptr = ptr;
966  	dmabuf->vmapping_counter = 1;
967  
968  out_unlock:
969  	mutex_unlock(&dmabuf->lock);
970  	return ptr;
971  }
972  EXPORT_SYMBOL_GPL(dma_buf_vmap);
973  
974  /**
975   * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
976   * @dmabuf:	[in]	buffer to vunmap
977   * @vaddr:	[in]	vmap to vunmap
978   */
979  void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
980  {
981  	if (WARN_ON(!dmabuf))
982  		return;
983  
984  	BUG_ON(!dmabuf->vmap_ptr);
985  	BUG_ON(dmabuf->vmapping_counter == 0);
986  	BUG_ON(dmabuf->vmap_ptr != vaddr);
987  
988  	mutex_lock(&dmabuf->lock);
989  	if (--dmabuf->vmapping_counter == 0) {
990  		if (dmabuf->ops->vunmap)
991  			dmabuf->ops->vunmap(dmabuf, vaddr);
992  		dmabuf->vmap_ptr = NULL;
993  	}
994  	mutex_unlock(&dmabuf->lock);
995  }
996  EXPORT_SYMBOL_GPL(dma_buf_vunmap);
997  
998  #ifdef CONFIG_DEBUG_FS
999  static int dma_buf_debug_show(struct seq_file *s, void *unused)
1000  {
1001  	int ret;
1002  	struct dma_buf *buf_obj;
1003  	struct dma_buf_attachment *attach_obj;
1004  	struct reservation_object *robj;
1005  	struct reservation_object_list *fobj;
1006  	struct dma_fence *fence;
1007  	unsigned seq;
1008  	int count = 0, attach_count, shared_count, i;
1009  	size_t size = 0;
1010  
1011  	ret = mutex_lock_interruptible(&db_list.lock);
1012  
1013  	if (ret)
1014  		return ret;
1015  
1016  	seq_puts(s, "\nDma-buf Objects:\n");
1017  	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
1018  		   "size", "flags", "mode", "count");
1019  
1020  	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1021  		ret = mutex_lock_interruptible(&buf_obj->lock);
1022  
1023  		if (ret) {
1024  			seq_puts(s,
1025  				 "\tERROR locking buffer object: skipping\n");
1026  			continue;
1027  		}
1028  
1029  		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
1030  				buf_obj->size,
1031  				buf_obj->file->f_flags, buf_obj->file->f_mode,
1032  				file_count(buf_obj->file),
1033  				buf_obj->exp_name);
1034  
1035  		robj = buf_obj->resv;
1036  		while (true) {
1037  			seq = read_seqcount_begin(&robj->seq);
1038  			rcu_read_lock();
1039  			fobj = rcu_dereference(robj->fence);
1040  			shared_count = fobj ? fobj->shared_count : 0;
1041  			fence = rcu_dereference(robj->fence_excl);
1042  			if (!read_seqcount_retry(&robj->seq, seq))
1043  				break;
1044  			rcu_read_unlock();
1045  		}
1046  
1047  		if (fence)
1048  			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1049  				   fence->ops->get_driver_name(fence),
1050  				   fence->ops->get_timeline_name(fence),
1051  				   dma_fence_is_signaled(fence) ? "" : "un");
1052  		for (i = 0; i < shared_count; i++) {
1053  			fence = rcu_dereference(fobj->shared[i]);
1054  			if (!dma_fence_get_rcu(fence))
1055  				continue;
1056  			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1057  				   fence->ops->get_driver_name(fence),
1058  				   fence->ops->get_timeline_name(fence),
1059  				   dma_fence_is_signaled(fence) ? "" : "un");
1060  		}
1061  		rcu_read_unlock();
1062  
1063  		seq_puts(s, "\tAttached Devices:\n");
1064  		attach_count = 0;
1065  
1066  		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1067  			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1068  			attach_count++;
1069  		}
1070  
1071  		seq_printf(s, "Total %d devices attached\n\n",
1072  				attach_count);
1073  
1074  		count++;
1075  		size += buf_obj->size;
1076  		mutex_unlock(&buf_obj->lock);
1077  	}
1078  
1079  	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1080  
1081  	mutex_unlock(&db_list.lock);
1082  	return 0;
1083  }
1084  
1085  DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1086  
1087  static struct dentry *dma_buf_debugfs_dir;
1088  
1089  static int dma_buf_init_debugfs(void)
1090  {
1091  	struct dentry *d;
1092  	int err = 0;
1093  
1094  	d = debugfs_create_dir("dma_buf", NULL);
1095  	if (IS_ERR(d))
1096  		return PTR_ERR(d);
1097  
1098  	dma_buf_debugfs_dir = d;
1099  
1100  	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1101  				NULL, &dma_buf_debug_fops);
1102  	if (IS_ERR(d)) {
1103  		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1104  		debugfs_remove_recursive(dma_buf_debugfs_dir);
1105  		dma_buf_debugfs_dir = NULL;
1106  		err = PTR_ERR(d);
1107  	}
1108  
1109  	return err;
1110  }
1111  
1112  static void dma_buf_uninit_debugfs(void)
1113  {
1114  	debugfs_remove_recursive(dma_buf_debugfs_dir);
1115  }
1116  #else
1117  static inline int dma_buf_init_debugfs(void)
1118  {
1119  	return 0;
1120  }
1121  static inline void dma_buf_uninit_debugfs(void)
1122  {
1123  }
1124  #endif
1125  
1126  static int __init dma_buf_init(void)
1127  {
1128  	mutex_init(&db_list.lock);
1129  	INIT_LIST_HEAD(&db_list.head);
1130  	dma_buf_init_debugfs();
1131  	return 0;
1132  }
1133  subsys_initcall(dma_buf_init);
1134  
1135  static void __exit dma_buf_deinit(void)
1136  {
1137  	dma_buf_uninit_debugfs();
1138  }
1139  __exitcall(dma_buf_deinit);
1140