1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Header file for dma buffer sharing framework. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 #ifndef __DMA_BUF_H__ 14 #define __DMA_BUF_H__ 15 16 #include <linux/file.h> 17 #include <linux/err.h> 18 #include <linux/scatterlist.h> 19 #include <linux/list.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/fs.h> 22 #include <linux/dma-fence.h> 23 #include <linux/wait.h> 24 25 struct device; 26 struct dma_buf; 27 struct dma_buf_attachment; 28 29 /** 30 * struct dma_buf_ops - operations possible on struct dma_buf 31 * @map_atomic: [optional] maps a page from the buffer into kernel address 32 * space, users may not block until the subsequent unmap call. 33 * This callback must not sleep. 34 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 35 * This Callback must not sleep. 36 * @map: [optional] maps a page from the buffer into kernel address space. 37 * @unmap: [optional] unmaps a page from the buffer. 38 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 39 * address space. Same restrictions as for vmap and friends apply. 40 * @vunmap: [optional] unmaps a vmap from the buffer 41 */ 42 struct dma_buf_ops { 43 /** 44 * @attach: 45 * 46 * This is called from dma_buf_attach() to make sure that a given 47 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters 48 * which support buffer objects in special locations like VRAM or 49 * device-specific carveout areas should check whether the buffer could 50 * be move to system memory (or directly accessed by the provided 51 * device), and otherwise need to fail the attach operation. 52 * 53 * The exporter should also in general check whether the current 54 * allocation fullfills the DMA constraints of the new device. If this 55 * is not the case, and the allocation cannot be moved, it should also 56 * fail the attach operation. 57 * 58 * Any exporter-private housekeeping data can be stored in the 59 * &dma_buf_attachment.priv pointer. 60 * 61 * This callback is optional. 62 * 63 * Returns: 64 * 65 * 0 on success, negative error code on failure. It might return -EBUSY 66 * to signal that backing storage is already allocated and incompatible 67 * with the requirements of requesting device. 68 */ 69 int (*attach)(struct dma_buf *, struct dma_buf_attachment *); 70 71 /** 72 * @detach: 73 * 74 * This is called by dma_buf_detach() to release a &dma_buf_attachment. 75 * Provided so that exporters can clean up any housekeeping for an 76 * &dma_buf_attachment. 77 * 78 * This callback is optional. 79 */ 80 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 81 82 /** 83 * @map_dma_buf: 84 * 85 * This is called by dma_buf_map_attachment() and is used to map a 86 * shared &dma_buf into device address space, and it is mandatory. It 87 * can only be called if @attach has been called successfully. This 88 * essentially pins the DMA buffer into place, and it cannot be moved 89 * any more 90 * 91 * This call may sleep, e.g. when the backing storage first needs to be 92 * allocated, or moved to a location suitable for all currently attached 93 * devices. 94 * 95 * Note that any specific buffer attributes required for this function 96 * should get added to device_dma_parameters accessible via 97 * &device.dma_params from the &dma_buf_attachment. The @attach callback 98 * should also check these constraints. 99 * 100 * If this is being called for the first time, the exporter can now 101 * choose to scan through the list of attachments for this buffer, 102 * collate the requirements of the attached devices, and choose an 103 * appropriate backing storage for the buffer. 104 * 105 * Based on enum dma_data_direction, it might be possible to have 106 * multiple users accessing at the same time (for reading, maybe), or 107 * any other kind of sharing that the exporter might wish to make 108 * available to buffer-users. 109 * 110 * Returns: 111 * 112 * A &sg_table scatter list of or the backing storage of the DMA buffer, 113 * already mapped into the device address space of the &device attached 114 * with the provided &dma_buf_attachment. 115 * 116 * On failure, returns a negative error value wrapped into a pointer. 117 * May also return -EINTR when a signal was received while being 118 * blocked. 119 */ 120 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 121 enum dma_data_direction); 122 /** 123 * @unmap_dma_buf: 124 * 125 * This is called by dma_buf_unmap_attachment() and should unmap and 126 * release the &sg_table allocated in @map_dma_buf, and it is mandatory. 127 * It should also unpin the backing storage if this is the last mapping 128 * of the DMA buffer, it the exporter supports backing storage 129 * migration. 130 */ 131 void (*unmap_dma_buf)(struct dma_buf_attachment *, 132 struct sg_table *, 133 enum dma_data_direction); 134 135 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 136 * if the call would block. 137 */ 138 139 /** 140 * @release: 141 * 142 * Called after the last dma_buf_put to release the &dma_buf, and 143 * mandatory. 144 */ 145 void (*release)(struct dma_buf *); 146 147 /** 148 * @begin_cpu_access: 149 * 150 * This is called from dma_buf_begin_cpu_access() and allows the 151 * exporter to ensure that the memory is actually available for cpu 152 * access - the exporter might need to allocate or swap-in and pin the 153 * backing storage. The exporter also needs to ensure that cpu access is 154 * coherent for the access direction. The direction can be used by the 155 * exporter to optimize the cache flushing, i.e. access with a different 156 * direction (read instead of write) might return stale or even bogus 157 * data (e.g. when the exporter needs to copy the data to temporary 158 * storage). 159 * 160 * This callback is optional. 161 * 162 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command 163 * from userspace (where storage shouldn't be pinned to avoid handing 164 * de-factor mlock rights to userspace) and for the kernel-internal 165 * users of the various kmap interfaces, where the backing storage must 166 * be pinned to guarantee that the atomic kmap calls can succeed. Since 167 * there's no in-kernel users of the kmap interfaces yet this isn't a 168 * real problem. 169 * 170 * Returns: 171 * 172 * 0 on success or a negative error code on failure. This can for 173 * example fail when the backing storage can't be allocated. Can also 174 * return -ERESTARTSYS or -EINTR when the call has been interrupted and 175 * needs to be restarted. 176 */ 177 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 178 179 /** 180 * @end_cpu_access: 181 * 182 * This is called from dma_buf_end_cpu_access() when the importer is 183 * done accessing the CPU. The exporter can use this to flush caches and 184 * unpin any resources pinned in @begin_cpu_access. 185 * The result of any dma_buf kmap calls after end_cpu_access is 186 * undefined. 187 * 188 * This callback is optional. 189 * 190 * Returns: 191 * 192 * 0 on success or a negative error code on failure. Can return 193 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs 194 * to be restarted. 195 */ 196 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 197 void *(*map)(struct dma_buf *, unsigned long); 198 void (*unmap)(struct dma_buf *, unsigned long, void *); 199 200 /** 201 * @mmap: 202 * 203 * This callback is used by the dma_buf_mmap() function 204 * 205 * Note that the mapping needs to be incoherent, userspace is expected 206 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. 207 * 208 * Because dma-buf buffers have invariant size over their lifetime, the 209 * dma-buf core checks whether a vma is too large and rejects such 210 * mappings. The exporter hence does not need to duplicate this check. 211 * Drivers do not need to check this themselves. 212 * 213 * If an exporter needs to manually flush caches and hence needs to fake 214 * coherency for mmap support, it needs to be able to zap all the ptes 215 * pointing at the backing storage. Now linux mm needs a struct 216 * address_space associated with the struct file stored in vma->vm_file 217 * to do that with the function unmap_mapping_range. But the dma_buf 218 * framework only backs every dma_buf fd with the anon_file struct file, 219 * i.e. all dma_bufs share the same file. 220 * 221 * Hence exporters need to setup their own file (and address_space) 222 * association by setting vma->vm_file and adjusting vma->vm_pgoff in 223 * the dma_buf mmap callback. In the specific case of a gem driver the 224 * exporter could use the shmem file already provided by gem (and set 225 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the 226 * corresponding range of the struct address_space associated with their 227 * own file. 228 * 229 * This callback is optional. 230 * 231 * Returns: 232 * 233 * 0 on success or a negative error code on failure. 234 */ 235 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 236 237 void *(*vmap)(struct dma_buf *); 238 void (*vunmap)(struct dma_buf *, void *vaddr); 239 }; 240 241 /** 242 * struct dma_buf - shared buffer object 243 * @size: size of the buffer 244 * @file: file pointer used for sharing buffers across, and for refcounting. 245 * @attachments: list of dma_buf_attachment that denotes all devices attached. 246 * @ops: dma_buf_ops associated with this buffer object. 247 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap 248 * @vmapping_counter: used internally to refcnt the vmaps 249 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 250 * @exp_name: name of the exporter; useful for debugging. 251 * @owner: pointer to exporter module; used for refcounting when exporter is a 252 * kernel module. 253 * @list_node: node for dma_buf accounting and debugging. 254 * @priv: exporter specific private data for this buffer object. 255 * @resv: reservation object linked to this dma-buf 256 * @poll: for userspace poll support 257 * @cb_excl: for userspace poll support 258 * @cb_shared: for userspace poll support 259 * 260 * This represents a shared buffer, created by calling dma_buf_export(). The 261 * userspace representation is a normal file descriptor, which can be created by 262 * calling dma_buf_fd(). 263 * 264 * Shared dma buffers are reference counted using dma_buf_put() and 265 * get_dma_buf(). 266 * 267 * Device DMA access is handled by the separate &struct dma_buf_attachment. 268 */ 269 struct dma_buf { 270 size_t size; 271 struct file *file; 272 struct list_head attachments; 273 const struct dma_buf_ops *ops; 274 struct mutex lock; 275 unsigned vmapping_counter; 276 void *vmap_ptr; 277 const char *exp_name; 278 struct module *owner; 279 struct list_head list_node; 280 void *priv; 281 struct reservation_object *resv; 282 283 /* poll support */ 284 wait_queue_head_t poll; 285 286 struct dma_buf_poll_cb_t { 287 struct dma_fence_cb cb; 288 wait_queue_head_t *poll; 289 290 __poll_t active; 291 } cb_excl, cb_shared; 292 }; 293 294 /** 295 * struct dma_buf_attachment - holds device-buffer attachment data 296 * @dmabuf: buffer for this attachment. 297 * @dev: device attached to the buffer. 298 * @node: list of dma_buf_attachment. 299 * @priv: exporter specific attachment data. 300 * 301 * This structure holds the attachment information between the dma_buf buffer 302 * and its user device(s). The list contains one attachment struct per device 303 * attached to the buffer. 304 * 305 * An attachment is created by calling dma_buf_attach(), and released again by 306 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a 307 * transfer is created by dma_buf_map_attachment() and freed again by calling 308 * dma_buf_unmap_attachment(). 309 */ 310 struct dma_buf_attachment { 311 struct dma_buf *dmabuf; 312 struct device *dev; 313 struct list_head node; 314 void *priv; 315 }; 316 317 /** 318 * struct dma_buf_export_info - holds information needed to export a dma_buf 319 * @exp_name: name of the exporter - useful for debugging. 320 * @owner: pointer to exporter module - used for refcounting kernel module 321 * @ops: Attach allocator-defined dma buf ops to the new buffer 322 * @size: Size of the buffer 323 * @flags: mode flags for the file 324 * @resv: reservation-object, NULL to allocate default one 325 * @priv: Attach private data of allocator to this buffer 326 * 327 * This structure holds the information required to export the buffer. Used 328 * with dma_buf_export() only. 329 */ 330 struct dma_buf_export_info { 331 const char *exp_name; 332 struct module *owner; 333 const struct dma_buf_ops *ops; 334 size_t size; 335 int flags; 336 struct reservation_object *resv; 337 void *priv; 338 }; 339 340 /** 341 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters 342 * @name: export-info name 343 * 344 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, 345 * zeroes it out and pre-populates exp_name in it. 346 */ 347 #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ 348 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ 349 .owner = THIS_MODULE } 350 351 /** 352 * get_dma_buf - convenience wrapper for get_file. 353 * @dmabuf: [in] pointer to dma_buf 354 * 355 * Increments the reference count on the dma-buf, needed in case of drivers 356 * that either need to create additional references to the dmabuf on the 357 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 358 * so that subsequent exports don't create a new dmabuf. 359 */ 360 static inline void get_dma_buf(struct dma_buf *dmabuf) 361 { 362 get_file(dmabuf->file); 363 } 364 365 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 366 struct device *dev); 367 void dma_buf_detach(struct dma_buf *dmabuf, 368 struct dma_buf_attachment *dmabuf_attach); 369 370 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); 371 372 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 373 struct dma_buf *dma_buf_get(int fd); 374 void dma_buf_put(struct dma_buf *dmabuf); 375 376 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 377 enum dma_data_direction); 378 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 379 enum dma_data_direction); 380 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 381 enum dma_data_direction dir); 382 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 383 enum dma_data_direction dir); 384 void *dma_buf_kmap(struct dma_buf *, unsigned long); 385 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 386 387 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 388 unsigned long); 389 void *dma_buf_vmap(struct dma_buf *); 390 void dma_buf_vunmap(struct dma_buf *, void *vaddr); 391 #endif /* __DMA_BUF_H__ */ 392