xref: /openbmc/linux/include/linux/dma-buf.h (revision b4e18b29)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Header file for dma buffer sharing framework.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 #ifndef __DMA_BUF_H__
14 #define __DMA_BUF_H__
15 
16 #include <linux/dma-buf-map.h>
17 #include <linux/file.h>
18 #include <linux/err.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/fs.h>
23 #include <linux/dma-fence.h>
24 #include <linux/wait.h>
25 
26 struct device;
27 struct dma_buf;
28 struct dma_buf_attachment;
29 
30 /**
31  * struct dma_buf_ops - operations possible on struct dma_buf
32  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
33  *	  address space. Same restrictions as for vmap and friends apply.
34  * @vunmap: [optional] unmaps a vmap from the buffer
35  */
36 struct dma_buf_ops {
37 	/**
38 	  * @cache_sgt_mapping:
39 	  *
40 	  * If true the framework will cache the first mapping made for each
41 	  * attachment. This avoids creating mappings for attachments multiple
42 	  * times.
43 	  */
44 	bool cache_sgt_mapping;
45 
46 	/**
47 	 * @attach:
48 	 *
49 	 * This is called from dma_buf_attach() to make sure that a given
50 	 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
51 	 * which support buffer objects in special locations like VRAM or
52 	 * device-specific carveout areas should check whether the buffer could
53 	 * be move to system memory (or directly accessed by the provided
54 	 * device), and otherwise need to fail the attach operation.
55 	 *
56 	 * The exporter should also in general check whether the current
57 	 * allocation fullfills the DMA constraints of the new device. If this
58 	 * is not the case, and the allocation cannot be moved, it should also
59 	 * fail the attach operation.
60 	 *
61 	 * Any exporter-private housekeeping data can be stored in the
62 	 * &dma_buf_attachment.priv pointer.
63 	 *
64 	 * This callback is optional.
65 	 *
66 	 * Returns:
67 	 *
68 	 * 0 on success, negative error code on failure. It might return -EBUSY
69 	 * to signal that backing storage is already allocated and incompatible
70 	 * with the requirements of requesting device.
71 	 */
72 	int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
73 
74 	/**
75 	 * @detach:
76 	 *
77 	 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
78 	 * Provided so that exporters can clean up any housekeeping for an
79 	 * &dma_buf_attachment.
80 	 *
81 	 * This callback is optional.
82 	 */
83 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
84 
85 	/**
86 	 * @pin:
87 	 *
88 	 * This is called by dma_buf_pin and lets the exporter know that the
89 	 * DMA-buf can't be moved any more.
90 	 *
91 	 * This is called with the dmabuf->resv object locked and is mutual
92 	 * exclusive with @cache_sgt_mapping.
93 	 *
94 	 * This callback is optional and should only be used in limited use
95 	 * cases like scanout and not for temporary pin operations.
96 	 *
97 	 * Returns:
98 	 *
99 	 * 0 on success, negative error code on failure.
100 	 */
101 	int (*pin)(struct dma_buf_attachment *attach);
102 
103 	/**
104 	 * @unpin:
105 	 *
106 	 * This is called by dma_buf_unpin and lets the exporter know that the
107 	 * DMA-buf can be moved again.
108 	 *
109 	 * This is called with the dmabuf->resv object locked and is mutual
110 	 * exclusive with @cache_sgt_mapping.
111 	 *
112 	 * This callback is optional.
113 	 */
114 	void (*unpin)(struct dma_buf_attachment *attach);
115 
116 	/**
117 	 * @map_dma_buf:
118 	 *
119 	 * This is called by dma_buf_map_attachment() and is used to map a
120 	 * shared &dma_buf into device address space, and it is mandatory. It
121 	 * can only be called if @attach has been called successfully.
122 	 *
123 	 * This call may sleep, e.g. when the backing storage first needs to be
124 	 * allocated, or moved to a location suitable for all currently attached
125 	 * devices.
126 	 *
127 	 * Note that any specific buffer attributes required for this function
128 	 * should get added to device_dma_parameters accessible via
129 	 * &device.dma_params from the &dma_buf_attachment. The @attach callback
130 	 * should also check these constraints.
131 	 *
132 	 * If this is being called for the first time, the exporter can now
133 	 * choose to scan through the list of attachments for this buffer,
134 	 * collate the requirements of the attached devices, and choose an
135 	 * appropriate backing storage for the buffer.
136 	 *
137 	 * Based on enum dma_data_direction, it might be possible to have
138 	 * multiple users accessing at the same time (for reading, maybe), or
139 	 * any other kind of sharing that the exporter might wish to make
140 	 * available to buffer-users.
141 	 *
142 	 * This is always called with the dmabuf->resv object locked when
143 	 * the dynamic_mapping flag is true.
144 	 *
145 	 * Returns:
146 	 *
147 	 * A &sg_table scatter list of or the backing storage of the DMA buffer,
148 	 * already mapped into the device address space of the &device attached
149 	 * with the provided &dma_buf_attachment. The addresses and lengths in
150 	 * the scatter list are PAGE_SIZE aligned.
151 	 *
152 	 * On failure, returns a negative error value wrapped into a pointer.
153 	 * May also return -EINTR when a signal was received while being
154 	 * blocked.
155 	 */
156 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
157 					 enum dma_data_direction);
158 	/**
159 	 * @unmap_dma_buf:
160 	 *
161 	 * This is called by dma_buf_unmap_attachment() and should unmap and
162 	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
163 	 * For static dma_buf handling this might also unpins the backing
164 	 * storage if this is the last mapping of the DMA buffer.
165 	 */
166 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
167 			      struct sg_table *,
168 			      enum dma_data_direction);
169 
170 	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
171 	 * if the call would block.
172 	 */
173 
174 	/**
175 	 * @release:
176 	 *
177 	 * Called after the last dma_buf_put to release the &dma_buf, and
178 	 * mandatory.
179 	 */
180 	void (*release)(struct dma_buf *);
181 
182 	/**
183 	 * @begin_cpu_access:
184 	 *
185 	 * This is called from dma_buf_begin_cpu_access() and allows the
186 	 * exporter to ensure that the memory is actually available for cpu
187 	 * access - the exporter might need to allocate or swap-in and pin the
188 	 * backing storage. The exporter also needs to ensure that cpu access is
189 	 * coherent for the access direction. The direction can be used by the
190 	 * exporter to optimize the cache flushing, i.e. access with a different
191 	 * direction (read instead of write) might return stale or even bogus
192 	 * data (e.g. when the exporter needs to copy the data to temporary
193 	 * storage).
194 	 *
195 	 * This callback is optional.
196 	 *
197 	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
198 	 * from userspace (where storage shouldn't be pinned to avoid handing
199 	 * de-factor mlock rights to userspace) and for the kernel-internal
200 	 * users of the various kmap interfaces, where the backing storage must
201 	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
202 	 * there's no in-kernel users of the kmap interfaces yet this isn't a
203 	 * real problem.
204 	 *
205 	 * Returns:
206 	 *
207 	 * 0 on success or a negative error code on failure. This can for
208 	 * example fail when the backing storage can't be allocated. Can also
209 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
210 	 * needs to be restarted.
211 	 */
212 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
213 
214 	/**
215 	 * @end_cpu_access:
216 	 *
217 	 * This is called from dma_buf_end_cpu_access() when the importer is
218 	 * done accessing the CPU. The exporter can use this to flush caches and
219 	 * unpin any resources pinned in @begin_cpu_access.
220 	 * The result of any dma_buf kmap calls after end_cpu_access is
221 	 * undefined.
222 	 *
223 	 * This callback is optional.
224 	 *
225 	 * Returns:
226 	 *
227 	 * 0 on success or a negative error code on failure. Can return
228 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
229 	 * to be restarted.
230 	 */
231 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
232 
233 	/**
234 	 * @mmap:
235 	 *
236 	 * This callback is used by the dma_buf_mmap() function
237 	 *
238 	 * Note that the mapping needs to be incoherent, userspace is expected
239 	 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
240 	 *
241 	 * Because dma-buf buffers have invariant size over their lifetime, the
242 	 * dma-buf core checks whether a vma is too large and rejects such
243 	 * mappings. The exporter hence does not need to duplicate this check.
244 	 * Drivers do not need to check this themselves.
245 	 *
246 	 * If an exporter needs to manually flush caches and hence needs to fake
247 	 * coherency for mmap support, it needs to be able to zap all the ptes
248 	 * pointing at the backing storage. Now linux mm needs a struct
249 	 * address_space associated with the struct file stored in vma->vm_file
250 	 * to do that with the function unmap_mapping_range. But the dma_buf
251 	 * framework only backs every dma_buf fd with the anon_file struct file,
252 	 * i.e. all dma_bufs share the same file.
253 	 *
254 	 * Hence exporters need to setup their own file (and address_space)
255 	 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
256 	 * the dma_buf mmap callback. In the specific case of a gem driver the
257 	 * exporter could use the shmem file already provided by gem (and set
258 	 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
259 	 * corresponding range of the struct address_space associated with their
260 	 * own file.
261 	 *
262 	 * This callback is optional.
263 	 *
264 	 * Returns:
265 	 *
266 	 * 0 on success or a negative error code on failure.
267 	 */
268 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
269 
270 	int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
271 	void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
272 };
273 
274 /**
275  * struct dma_buf - shared buffer object
276  * @size: size of the buffer; invariant over the lifetime of the buffer.
277  * @file: file pointer used for sharing buffers across, and for refcounting.
278  * @attachments: list of dma_buf_attachment that denotes all devices attached,
279  *               protected by dma_resv lock.
280  * @ops: dma_buf_ops associated with this buffer object.
281  * @lock: used internally to serialize list manipulation, attach/detach and
282  *        vmap/unmap
283  * @vmapping_counter: used internally to refcnt the vmaps
284  * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
285  * @exp_name: name of the exporter; useful for debugging.
286  * @name: userspace-provided name; useful for accounting and debugging,
287  *        protected by @resv.
288  * @name_lock: spinlock to protect name access
289  * @owner: pointer to exporter module; used for refcounting when exporter is a
290  *         kernel module.
291  * @list_node: node for dma_buf accounting and debugging.
292  * @priv: exporter specific private data for this buffer object.
293  * @resv: reservation object linked to this dma-buf
294  * @poll: for userspace poll support
295  * @cb_excl: for userspace poll support
296  * @cb_shared: for userspace poll support
297  *
298  * This represents a shared buffer, created by calling dma_buf_export(). The
299  * userspace representation is a normal file descriptor, which can be created by
300  * calling dma_buf_fd().
301  *
302  * Shared dma buffers are reference counted using dma_buf_put() and
303  * get_dma_buf().
304  *
305  * Device DMA access is handled by the separate &struct dma_buf_attachment.
306  */
307 struct dma_buf {
308 	size_t size;
309 	struct file *file;
310 	struct list_head attachments;
311 	const struct dma_buf_ops *ops;
312 	struct mutex lock;
313 	unsigned vmapping_counter;
314 	struct dma_buf_map vmap_ptr;
315 	const char *exp_name;
316 	const char *name;
317 	spinlock_t name_lock;
318 	struct module *owner;
319 	struct list_head list_node;
320 	void *priv;
321 	struct dma_resv *resv;
322 
323 	/* poll support */
324 	wait_queue_head_t poll;
325 
326 	struct dma_buf_poll_cb_t {
327 		struct dma_fence_cb cb;
328 		wait_queue_head_t *poll;
329 
330 		__poll_t active;
331 	} cb_excl, cb_shared;
332 };
333 
334 /**
335  * struct dma_buf_attach_ops - importer operations for an attachment
336  *
337  * Attachment operations implemented by the importer.
338  */
339 struct dma_buf_attach_ops {
340 	/**
341 	 * @allow_peer2peer:
342 	 *
343 	 * If this is set to true the importer must be able to handle peer
344 	 * resources without struct pages.
345 	 */
346 	bool allow_peer2peer;
347 
348 	/**
349 	 * @move_notify: [optional] notification that the DMA-buf is moving
350 	 *
351 	 * If this callback is provided the framework can avoid pinning the
352 	 * backing store while mappings exists.
353 	 *
354 	 * This callback is called with the lock of the reservation object
355 	 * associated with the dma_buf held and the mapping function must be
356 	 * called with this lock held as well. This makes sure that no mapping
357 	 * is created concurrently with an ongoing move operation.
358 	 *
359 	 * Mappings stay valid and are not directly affected by this callback.
360 	 * But the DMA-buf can now be in a different physical location, so all
361 	 * mappings should be destroyed and re-created as soon as possible.
362 	 *
363 	 * New mappings can be created after this callback returns, and will
364 	 * point to the new location of the DMA-buf.
365 	 */
366 	void (*move_notify)(struct dma_buf_attachment *attach);
367 };
368 
369 /**
370  * struct dma_buf_attachment - holds device-buffer attachment data
371  * @dmabuf: buffer for this attachment.
372  * @dev: device attached to the buffer.
373  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
374  * @sgt: cached mapping.
375  * @dir: direction of cached mapping.
376  * @peer2peer: true if the importer can handle peer resources without pages.
377  * @priv: exporter specific attachment data.
378  * @importer_ops: importer operations for this attachment, if provided
379  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
380  * @importer_priv: importer specific attachment data.
381  *
382  * This structure holds the attachment information between the dma_buf buffer
383  * and its user device(s). The list contains one attachment struct per device
384  * attached to the buffer.
385  *
386  * An attachment is created by calling dma_buf_attach(), and released again by
387  * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
388  * transfer is created by dma_buf_map_attachment() and freed again by calling
389  * dma_buf_unmap_attachment().
390  */
391 struct dma_buf_attachment {
392 	struct dma_buf *dmabuf;
393 	struct device *dev;
394 	struct list_head node;
395 	struct sg_table *sgt;
396 	enum dma_data_direction dir;
397 	bool peer2peer;
398 	const struct dma_buf_attach_ops *importer_ops;
399 	void *importer_priv;
400 	void *priv;
401 };
402 
403 /**
404  * struct dma_buf_export_info - holds information needed to export a dma_buf
405  * @exp_name:	name of the exporter - useful for debugging.
406  * @owner:	pointer to exporter module - used for refcounting kernel module
407  * @ops:	Attach allocator-defined dma buf ops to the new buffer
408  * @size:	Size of the buffer - invariant over the lifetime of the buffer
409  * @flags:	mode flags for the file
410  * @resv:	reservation-object, NULL to allocate default one
411  * @priv:	Attach private data of allocator to this buffer
412  *
413  * This structure holds the information required to export the buffer. Used
414  * with dma_buf_export() only.
415  */
416 struct dma_buf_export_info {
417 	const char *exp_name;
418 	struct module *owner;
419 	const struct dma_buf_ops *ops;
420 	size_t size;
421 	int flags;
422 	struct dma_resv *resv;
423 	void *priv;
424 };
425 
426 /**
427  * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
428  * @name: export-info name
429  *
430  * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
431  * zeroes it out and pre-populates exp_name in it.
432  */
433 #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
434 	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
435 					 .owner = THIS_MODULE }
436 
437 /**
438  * get_dma_buf - convenience wrapper for get_file.
439  * @dmabuf:	[in]	pointer to dma_buf
440  *
441  * Increments the reference count on the dma-buf, needed in case of drivers
442  * that either need to create additional references to the dmabuf on the
443  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
444  * so that subsequent exports don't create a new dmabuf.
445  */
446 static inline void get_dma_buf(struct dma_buf *dmabuf)
447 {
448 	get_file(dmabuf->file);
449 }
450 
451 /**
452  * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
453  * @dmabuf: the DMA-buf to check
454  *
455  * Returns true if a DMA-buf exporter wants to be called with the dma_resv
456  * locked for the map/unmap callbacks, false if it doesn't wants to be called
457  * with the lock held.
458  */
459 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
460 {
461 	return !!dmabuf->ops->pin;
462 }
463 
464 /**
465  * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
466  * mappinsg
467  * @attach: the DMA-buf attachment to check
468  *
469  * Returns true if a DMA-buf importer wants to call the map/unmap functions with
470  * the dma_resv lock held.
471  */
472 static inline bool
473 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
474 {
475 	return !!attach->importer_ops;
476 }
477 
478 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
479 					  struct device *dev);
480 struct dma_buf_attachment *
481 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
482 		       const struct dma_buf_attach_ops *importer_ops,
483 		       void *importer_priv);
484 void dma_buf_detach(struct dma_buf *dmabuf,
485 		    struct dma_buf_attachment *attach);
486 int dma_buf_pin(struct dma_buf_attachment *attach);
487 void dma_buf_unpin(struct dma_buf_attachment *attach);
488 
489 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
490 
491 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
492 struct dma_buf *dma_buf_get(int fd);
493 void dma_buf_put(struct dma_buf *dmabuf);
494 
495 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
496 					enum dma_data_direction);
497 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
498 				enum dma_data_direction);
499 void dma_buf_move_notify(struct dma_buf *dma_buf);
500 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
501 			     enum dma_data_direction dir);
502 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
503 			   enum dma_data_direction dir);
504 
505 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
506 		 unsigned long);
507 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
508 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
509 #endif /* __DMA_BUF_H__ */
510