xref: /openbmc/linux/include/linux/dma-buf.h (revision 4bdc0d67)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Header file for dma buffer sharing framework.
4  *
5  * Copyright(C) 2011 Linaro Limited. All rights reserved.
6  * Author: Sumit Semwal <sumit.semwal@ti.com>
7  *
8  * Many thanks to linaro-mm-sig list, and specially
9  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11  * refining of this idea.
12  */
13 #ifndef __DMA_BUF_H__
14 #define __DMA_BUF_H__
15 
16 #include <linux/file.h>
17 #include <linux/err.h>
18 #include <linux/scatterlist.h>
19 #include <linux/list.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/fs.h>
22 #include <linux/dma-fence.h>
23 #include <linux/wait.h>
24 
25 struct device;
26 struct dma_buf;
27 struct dma_buf_attachment;
28 
29 /**
30  * struct dma_buf_ops - operations possible on struct dma_buf
31  * @vmap: [optional] creates a virtual mapping for the buffer into kernel
32  *	  address space. Same restrictions as for vmap and friends apply.
33  * @vunmap: [optional] unmaps a vmap from the buffer
34  */
35 struct dma_buf_ops {
36 	/**
37 	  * @cache_sgt_mapping:
38 	  *
39 	  * If true the framework will cache the first mapping made for each
40 	  * attachment. This avoids creating mappings for attachments multiple
41 	  * times.
42 	  */
43 	bool cache_sgt_mapping;
44 
45 	/**
46 	 * @dynamic_mapping:
47 	 *
48 	 * If true the framework makes sure that the map/unmap_dma_buf
49 	 * callbacks are always called with the dma_resv object locked.
50 	 *
51 	 * If false the framework makes sure that the map/unmap_dma_buf
52 	 * callbacks are always called without the dma_resv object locked.
53 	 * Mutual exclusive with @cache_sgt_mapping.
54 	 */
55 	bool dynamic_mapping;
56 
57 	/**
58 	 * @attach:
59 	 *
60 	 * This is called from dma_buf_attach() to make sure that a given
61 	 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
62 	 * which support buffer objects in special locations like VRAM or
63 	 * device-specific carveout areas should check whether the buffer could
64 	 * be move to system memory (or directly accessed by the provided
65 	 * device), and otherwise need to fail the attach operation.
66 	 *
67 	 * The exporter should also in general check whether the current
68 	 * allocation fullfills the DMA constraints of the new device. If this
69 	 * is not the case, and the allocation cannot be moved, it should also
70 	 * fail the attach operation.
71 	 *
72 	 * Any exporter-private housekeeping data can be stored in the
73 	 * &dma_buf_attachment.priv pointer.
74 	 *
75 	 * This callback is optional.
76 	 *
77 	 * Returns:
78 	 *
79 	 * 0 on success, negative error code on failure. It might return -EBUSY
80 	 * to signal that backing storage is already allocated and incompatible
81 	 * with the requirements of requesting device.
82 	 */
83 	int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
84 
85 	/**
86 	 * @detach:
87 	 *
88 	 * This is called by dma_buf_detach() to release a &dma_buf_attachment.
89 	 * Provided so that exporters can clean up any housekeeping for an
90 	 * &dma_buf_attachment.
91 	 *
92 	 * This callback is optional.
93 	 */
94 	void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
95 
96 	/**
97 	 * @map_dma_buf:
98 	 *
99 	 * This is called by dma_buf_map_attachment() and is used to map a
100 	 * shared &dma_buf into device address space, and it is mandatory. It
101 	 * can only be called if @attach has been called successfully. This
102 	 * essentially pins the DMA buffer into place, and it cannot be moved
103 	 * any more
104 	 *
105 	 * This call may sleep, e.g. when the backing storage first needs to be
106 	 * allocated, or moved to a location suitable for all currently attached
107 	 * devices.
108 	 *
109 	 * Note that any specific buffer attributes required for this function
110 	 * should get added to device_dma_parameters accessible via
111 	 * &device.dma_params from the &dma_buf_attachment. The @attach callback
112 	 * should also check these constraints.
113 	 *
114 	 * If this is being called for the first time, the exporter can now
115 	 * choose to scan through the list of attachments for this buffer,
116 	 * collate the requirements of the attached devices, and choose an
117 	 * appropriate backing storage for the buffer.
118 	 *
119 	 * Based on enum dma_data_direction, it might be possible to have
120 	 * multiple users accessing at the same time (for reading, maybe), or
121 	 * any other kind of sharing that the exporter might wish to make
122 	 * available to buffer-users.
123 	 *
124 	 * This is always called with the dmabuf->resv object locked when
125 	 * the dynamic_mapping flag is true.
126 	 *
127 	 * Returns:
128 	 *
129 	 * A &sg_table scatter list of or the backing storage of the DMA buffer,
130 	 * already mapped into the device address space of the &device attached
131 	 * with the provided &dma_buf_attachment.
132 	 *
133 	 * On failure, returns a negative error value wrapped into a pointer.
134 	 * May also return -EINTR when a signal was received while being
135 	 * blocked.
136 	 */
137 	struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *,
138 					 enum dma_data_direction);
139 	/**
140 	 * @unmap_dma_buf:
141 	 *
142 	 * This is called by dma_buf_unmap_attachment() and should unmap and
143 	 * release the &sg_table allocated in @map_dma_buf, and it is mandatory.
144 	 * It should also unpin the backing storage if this is the last mapping
145 	 * of the DMA buffer, it the exporter supports backing storage
146 	 * migration.
147 	 */
148 	void (*unmap_dma_buf)(struct dma_buf_attachment *,
149 			      struct sg_table *,
150 			      enum dma_data_direction);
151 
152 	/* TODO: Add try_map_dma_buf version, to return immed with -EBUSY
153 	 * if the call would block.
154 	 */
155 
156 	/**
157 	 * @release:
158 	 *
159 	 * Called after the last dma_buf_put to release the &dma_buf, and
160 	 * mandatory.
161 	 */
162 	void (*release)(struct dma_buf *);
163 
164 	/**
165 	 * @begin_cpu_access:
166 	 *
167 	 * This is called from dma_buf_begin_cpu_access() and allows the
168 	 * exporter to ensure that the memory is actually available for cpu
169 	 * access - the exporter might need to allocate or swap-in and pin the
170 	 * backing storage. The exporter also needs to ensure that cpu access is
171 	 * coherent for the access direction. The direction can be used by the
172 	 * exporter to optimize the cache flushing, i.e. access with a different
173 	 * direction (read instead of write) might return stale or even bogus
174 	 * data (e.g. when the exporter needs to copy the data to temporary
175 	 * storage).
176 	 *
177 	 * This callback is optional.
178 	 *
179 	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
180 	 * from userspace (where storage shouldn't be pinned to avoid handing
181 	 * de-factor mlock rights to userspace) and for the kernel-internal
182 	 * users of the various kmap interfaces, where the backing storage must
183 	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
184 	 * there's no in-kernel users of the kmap interfaces yet this isn't a
185 	 * real problem.
186 	 *
187 	 * Returns:
188 	 *
189 	 * 0 on success or a negative error code on failure. This can for
190 	 * example fail when the backing storage can't be allocated. Can also
191 	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
192 	 * needs to be restarted.
193 	 */
194 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
195 
196 	/**
197 	 * @end_cpu_access:
198 	 *
199 	 * This is called from dma_buf_end_cpu_access() when the importer is
200 	 * done accessing the CPU. The exporter can use this to flush caches and
201 	 * unpin any resources pinned in @begin_cpu_access.
202 	 * The result of any dma_buf kmap calls after end_cpu_access is
203 	 * undefined.
204 	 *
205 	 * This callback is optional.
206 	 *
207 	 * Returns:
208 	 *
209 	 * 0 on success or a negative error code on failure. Can return
210 	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
211 	 * to be restarted.
212 	 */
213 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
214 
215 	/**
216 	 * @mmap:
217 	 *
218 	 * This callback is used by the dma_buf_mmap() function
219 	 *
220 	 * Note that the mapping needs to be incoherent, userspace is expected
221 	 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface.
222 	 *
223 	 * Because dma-buf buffers have invariant size over their lifetime, the
224 	 * dma-buf core checks whether a vma is too large and rejects such
225 	 * mappings. The exporter hence does not need to duplicate this check.
226 	 * Drivers do not need to check this themselves.
227 	 *
228 	 * If an exporter needs to manually flush caches and hence needs to fake
229 	 * coherency for mmap support, it needs to be able to zap all the ptes
230 	 * pointing at the backing storage. Now linux mm needs a struct
231 	 * address_space associated with the struct file stored in vma->vm_file
232 	 * to do that with the function unmap_mapping_range. But the dma_buf
233 	 * framework only backs every dma_buf fd with the anon_file struct file,
234 	 * i.e. all dma_bufs share the same file.
235 	 *
236 	 * Hence exporters need to setup their own file (and address_space)
237 	 * association by setting vma->vm_file and adjusting vma->vm_pgoff in
238 	 * the dma_buf mmap callback. In the specific case of a gem driver the
239 	 * exporter could use the shmem file already provided by gem (and set
240 	 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the
241 	 * corresponding range of the struct address_space associated with their
242 	 * own file.
243 	 *
244 	 * This callback is optional.
245 	 *
246 	 * Returns:
247 	 *
248 	 * 0 on success or a negative error code on failure.
249 	 */
250 	int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
251 
252 	/**
253 	 * @map:
254 	 *
255 	 * Maps a page from the buffer into kernel address space. The page is
256 	 * specified by offset into the buffer in PAGE_SIZE units.
257 	 *
258 	 * This callback is optional.
259 	 *
260 	 * Returns:
261 	 *
262 	 * Virtual address pointer where requested page can be accessed. NULL
263 	 * on error or when this function is unimplemented by the exporter.
264 	 */
265 	void *(*map)(struct dma_buf *, unsigned long);
266 
267 	/**
268 	 * @unmap:
269 	 *
270 	 * Unmaps a page from the buffer. Page offset and address pointer should
271 	 * be the same as the one passed to and returned by matching call to map.
272 	 *
273 	 * This callback is optional.
274 	 */
275 	void (*unmap)(struct dma_buf *, unsigned long, void *);
276 
277 	void *(*vmap)(struct dma_buf *);
278 	void (*vunmap)(struct dma_buf *, void *vaddr);
279 };
280 
281 /**
282  * struct dma_buf - shared buffer object
283  * @size: size of the buffer
284  * @file: file pointer used for sharing buffers across, and for refcounting.
285  * @attachments: list of dma_buf_attachment that denotes all devices attached,
286  *               protected by dma_resv lock.
287  * @ops: dma_buf_ops associated with this buffer object.
288  * @lock: used internally to serialize list manipulation, attach/detach and
289  *        vmap/unmap
290  * @vmapping_counter: used internally to refcnt the vmaps
291  * @vmap_ptr: the current vmap ptr if vmapping_counter > 0
292  * @exp_name: name of the exporter; useful for debugging.
293  * @name: userspace-provided name; useful for accounting and debugging,
294  *        protected by @resv.
295  * @owner: pointer to exporter module; used for refcounting when exporter is a
296  *         kernel module.
297  * @list_node: node for dma_buf accounting and debugging.
298  * @priv: exporter specific private data for this buffer object.
299  * @resv: reservation object linked to this dma-buf
300  * @poll: for userspace poll support
301  * @cb_excl: for userspace poll support
302  * @cb_shared: for userspace poll support
303  *
304  * This represents a shared buffer, created by calling dma_buf_export(). The
305  * userspace representation is a normal file descriptor, which can be created by
306  * calling dma_buf_fd().
307  *
308  * Shared dma buffers are reference counted using dma_buf_put() and
309  * get_dma_buf().
310  *
311  * Device DMA access is handled by the separate &struct dma_buf_attachment.
312  */
313 struct dma_buf {
314 	size_t size;
315 	struct file *file;
316 	struct list_head attachments;
317 	const struct dma_buf_ops *ops;
318 	struct mutex lock;
319 	unsigned vmapping_counter;
320 	void *vmap_ptr;
321 	const char *exp_name;
322 	const char *name;
323 	struct module *owner;
324 	struct list_head list_node;
325 	void *priv;
326 	struct dma_resv *resv;
327 
328 	/* poll support */
329 	wait_queue_head_t poll;
330 
331 	struct dma_buf_poll_cb_t {
332 		struct dma_fence_cb cb;
333 		wait_queue_head_t *poll;
334 
335 		__poll_t active;
336 	} cb_excl, cb_shared;
337 };
338 
339 /**
340  * struct dma_buf_attachment - holds device-buffer attachment data
341  * @dmabuf: buffer for this attachment.
342  * @dev: device attached to the buffer.
343  * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
344  * @sgt: cached mapping.
345  * @dir: direction of cached mapping.
346  * @priv: exporter specific attachment data.
347  * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
348  * dma_resv lock held.
349  *
350  * This structure holds the attachment information between the dma_buf buffer
351  * and its user device(s). The list contains one attachment struct per device
352  * attached to the buffer.
353  *
354  * An attachment is created by calling dma_buf_attach(), and released again by
355  * calling dma_buf_detach(). The DMA mapping itself needed to initiate a
356  * transfer is created by dma_buf_map_attachment() and freed again by calling
357  * dma_buf_unmap_attachment().
358  */
359 struct dma_buf_attachment {
360 	struct dma_buf *dmabuf;
361 	struct device *dev;
362 	struct list_head node;
363 	struct sg_table *sgt;
364 	enum dma_data_direction dir;
365 	bool dynamic_mapping;
366 	void *priv;
367 };
368 
369 /**
370  * struct dma_buf_export_info - holds information needed to export a dma_buf
371  * @exp_name:	name of the exporter - useful for debugging.
372  * @owner:	pointer to exporter module - used for refcounting kernel module
373  * @ops:	Attach allocator-defined dma buf ops to the new buffer
374  * @size:	Size of the buffer
375  * @flags:	mode flags for the file
376  * @resv:	reservation-object, NULL to allocate default one
377  * @priv:	Attach private data of allocator to this buffer
378  *
379  * This structure holds the information required to export the buffer. Used
380  * with dma_buf_export() only.
381  */
382 struct dma_buf_export_info {
383 	const char *exp_name;
384 	struct module *owner;
385 	const struct dma_buf_ops *ops;
386 	size_t size;
387 	int flags;
388 	struct dma_resv *resv;
389 	void *priv;
390 };
391 
392 /**
393  * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
394  * @name: export-info name
395  *
396  * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info,
397  * zeroes it out and pre-populates exp_name in it.
398  */
399 #define DEFINE_DMA_BUF_EXPORT_INFO(name)	\
400 	struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \
401 					 .owner = THIS_MODULE }
402 
403 /**
404  * get_dma_buf - convenience wrapper for get_file.
405  * @dmabuf:	[in]	pointer to dma_buf
406  *
407  * Increments the reference count on the dma-buf, needed in case of drivers
408  * that either need to create additional references to the dmabuf on the
409  * kernel side.  For example, an exporter that needs to keep a dmabuf ptr
410  * so that subsequent exports don't create a new dmabuf.
411  */
412 static inline void get_dma_buf(struct dma_buf *dmabuf)
413 {
414 	get_file(dmabuf->file);
415 }
416 
417 /**
418  * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
419  * @dmabuf: the DMA-buf to check
420  *
421  * Returns true if a DMA-buf exporter wants to be called with the dma_resv
422  * locked for the map/unmap callbacks, false if it doesn't wants to be called
423  * with the lock held.
424  */
425 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
426 {
427 	return dmabuf->ops->dynamic_mapping;
428 }
429 
430 /**
431  * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
432  * mappinsg
433  * @attach: the DMA-buf attachment to check
434  *
435  * Returns true if a DMA-buf importer wants to call the map/unmap functions with
436  * the dma_resv lock held.
437  */
438 static inline bool
439 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
440 {
441 	return attach->dynamic_mapping;
442 }
443 
444 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
445 					  struct device *dev);
446 struct dma_buf_attachment *
447 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
448 		       bool dynamic_mapping);
449 void dma_buf_detach(struct dma_buf *dmabuf,
450 		    struct dma_buf_attachment *attach);
451 
452 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
453 
454 int dma_buf_fd(struct dma_buf *dmabuf, int flags);
455 struct dma_buf *dma_buf_get(int fd);
456 void dma_buf_put(struct dma_buf *dmabuf);
457 
458 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
459 					enum dma_data_direction);
460 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
461 				enum dma_data_direction);
462 void dma_buf_move_notify(struct dma_buf *dma_buf);
463 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
464 			     enum dma_data_direction dir);
465 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
466 			   enum dma_data_direction dir);
467 void *dma_buf_kmap(struct dma_buf *, unsigned long);
468 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
469 
470 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
471 		 unsigned long);
472 void *dma_buf_vmap(struct dma_buf *);
473 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
474 #endif /* __DMA_BUF_H__ */
475