1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Header file for dma buffer sharing framework. 4 * 5 * Copyright(C) 2011 Linaro Limited. All rights reserved. 6 * Author: Sumit Semwal <sumit.semwal@ti.com> 7 * 8 * Many thanks to linaro-mm-sig list, and specially 9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 11 * refining of this idea. 12 */ 13 #ifndef __DMA_BUF_H__ 14 #define __DMA_BUF_H__ 15 16 #include <linux/dma-buf-map.h> 17 #include <linux/file.h> 18 #include <linux/err.h> 19 #include <linux/scatterlist.h> 20 #include <linux/list.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/fs.h> 23 #include <linux/dma-fence.h> 24 #include <linux/wait.h> 25 26 struct device; 27 struct dma_buf; 28 struct dma_buf_attachment; 29 30 /** 31 * struct dma_buf_ops - operations possible on struct dma_buf 32 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 33 * address space. Same restrictions as for vmap and friends apply. 34 * @vunmap: [optional] unmaps a vmap from the buffer 35 */ 36 struct dma_buf_ops { 37 /** 38 * @cache_sgt_mapping: 39 * 40 * If true the framework will cache the first mapping made for each 41 * attachment. This avoids creating mappings for attachments multiple 42 * times. 43 */ 44 bool cache_sgt_mapping; 45 46 /** 47 * @attach: 48 * 49 * This is called from dma_buf_attach() to make sure that a given 50 * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters 51 * which support buffer objects in special locations like VRAM or 52 * device-specific carveout areas should check whether the buffer could 53 * be move to system memory (or directly accessed by the provided 54 * device), and otherwise need to fail the attach operation. 55 * 56 * The exporter should also in general check whether the current 57 * allocation fullfills the DMA constraints of the new device. If this 58 * is not the case, and the allocation cannot be moved, it should also 59 * fail the attach operation. 60 * 61 * Any exporter-private housekeeping data can be stored in the 62 * &dma_buf_attachment.priv pointer. 63 * 64 * This callback is optional. 65 * 66 * Returns: 67 * 68 * 0 on success, negative error code on failure. It might return -EBUSY 69 * to signal that backing storage is already allocated and incompatible 70 * with the requirements of requesting device. 71 */ 72 int (*attach)(struct dma_buf *, struct dma_buf_attachment *); 73 74 /** 75 * @detach: 76 * 77 * This is called by dma_buf_detach() to release a &dma_buf_attachment. 78 * Provided so that exporters can clean up any housekeeping for an 79 * &dma_buf_attachment. 80 * 81 * This callback is optional. 82 */ 83 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 84 85 /** 86 * @pin: 87 * 88 * This is called by dma_buf_pin() and lets the exporter know that the 89 * DMA-buf can't be moved any more. The exporter should pin the buffer 90 * into system memory to make sure it is generally accessible by other 91 * devices. 92 * 93 * This is called with the &dmabuf.resv object locked and is mutual 94 * exclusive with @cache_sgt_mapping. 95 * 96 * This is called automatically for non-dynamic importers from 97 * dma_buf_attach(). 98 * 99 * Returns: 100 * 101 * 0 on success, negative error code on failure. 102 */ 103 int (*pin)(struct dma_buf_attachment *attach); 104 105 /** 106 * @unpin: 107 * 108 * This is called by dma_buf_unpin() and lets the exporter know that the 109 * DMA-buf can be moved again. 110 * 111 * This is called with the dmabuf->resv object locked and is mutual 112 * exclusive with @cache_sgt_mapping. 113 * 114 * This callback is optional. 115 */ 116 void (*unpin)(struct dma_buf_attachment *attach); 117 118 /** 119 * @map_dma_buf: 120 * 121 * This is called by dma_buf_map_attachment() and is used to map a 122 * shared &dma_buf into device address space, and it is mandatory. It 123 * can only be called if @attach has been called successfully. 124 * 125 * This call may sleep, e.g. when the backing storage first needs to be 126 * allocated, or moved to a location suitable for all currently attached 127 * devices. 128 * 129 * Note that any specific buffer attributes required for this function 130 * should get added to device_dma_parameters accessible via 131 * &device.dma_params from the &dma_buf_attachment. The @attach callback 132 * should also check these constraints. 133 * 134 * If this is being called for the first time, the exporter can now 135 * choose to scan through the list of attachments for this buffer, 136 * collate the requirements of the attached devices, and choose an 137 * appropriate backing storage for the buffer. 138 * 139 * Based on enum dma_data_direction, it might be possible to have 140 * multiple users accessing at the same time (for reading, maybe), or 141 * any other kind of sharing that the exporter might wish to make 142 * available to buffer-users. 143 * 144 * This is always called with the dmabuf->resv object locked when 145 * the dynamic_mapping flag is true. 146 * 147 * Returns: 148 * 149 * A &sg_table scatter list of or the backing storage of the DMA buffer, 150 * already mapped into the device address space of the &device attached 151 * with the provided &dma_buf_attachment. The addresses and lengths in 152 * the scatter list are PAGE_SIZE aligned. 153 * 154 * On failure, returns a negative error value wrapped into a pointer. 155 * May also return -EINTR when a signal was received while being 156 * blocked. 157 * 158 * Note that exporters should not try to cache the scatter list, or 159 * return the same one for multiple calls. Caching is done either by the 160 * DMA-BUF code (for non-dynamic importers) or the importer. Ownership 161 * of the scatter list is transferred to the caller, and returned by 162 * @unmap_dma_buf. 163 */ 164 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 165 enum dma_data_direction); 166 /** 167 * @unmap_dma_buf: 168 * 169 * This is called by dma_buf_unmap_attachment() and should unmap and 170 * release the &sg_table allocated in @map_dma_buf, and it is mandatory. 171 * For static dma_buf handling this might also unpins the backing 172 * storage if this is the last mapping of the DMA buffer. 173 */ 174 void (*unmap_dma_buf)(struct dma_buf_attachment *, 175 struct sg_table *, 176 enum dma_data_direction); 177 178 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 179 * if the call would block. 180 */ 181 182 /** 183 * @release: 184 * 185 * Called after the last dma_buf_put to release the &dma_buf, and 186 * mandatory. 187 */ 188 void (*release)(struct dma_buf *); 189 190 /** 191 * @begin_cpu_access: 192 * 193 * This is called from dma_buf_begin_cpu_access() and allows the 194 * exporter to ensure that the memory is actually coherent for cpu 195 * access. The exporter also needs to ensure that cpu access is coherent 196 * for the access direction. The direction can be used by the exporter 197 * to optimize the cache flushing, i.e. access with a different 198 * direction (read instead of write) might return stale or even bogus 199 * data (e.g. when the exporter needs to copy the data to temporary 200 * storage). 201 * 202 * Note that this is both called through the DMA_BUF_IOCTL_SYNC IOCTL 203 * command for userspace mappings established through @mmap, and also 204 * for kernel mappings established with @vmap. 205 * 206 * This callback is optional. 207 * 208 * Returns: 209 * 210 * 0 on success or a negative error code on failure. This can for 211 * example fail when the backing storage can't be allocated. Can also 212 * return -ERESTARTSYS or -EINTR when the call has been interrupted and 213 * needs to be restarted. 214 */ 215 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 216 217 /** 218 * @end_cpu_access: 219 * 220 * This is called from dma_buf_end_cpu_access() when the importer is 221 * done accessing the CPU. The exporter can use this to flush caches and 222 * undo anything else done in @begin_cpu_access. 223 * 224 * This callback is optional. 225 * 226 * Returns: 227 * 228 * 0 on success or a negative error code on failure. Can return 229 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs 230 * to be restarted. 231 */ 232 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 233 234 /** 235 * @mmap: 236 * 237 * This callback is used by the dma_buf_mmap() function 238 * 239 * Note that the mapping needs to be incoherent, userspace is expected 240 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. 241 * 242 * Because dma-buf buffers have invariant size over their lifetime, the 243 * dma-buf core checks whether a vma is too large and rejects such 244 * mappings. The exporter hence does not need to duplicate this check. 245 * Drivers do not need to check this themselves. 246 * 247 * If an exporter needs to manually flush caches and hence needs to fake 248 * coherency for mmap support, it needs to be able to zap all the ptes 249 * pointing at the backing storage. Now linux mm needs a struct 250 * address_space associated with the struct file stored in vma->vm_file 251 * to do that with the function unmap_mapping_range. But the dma_buf 252 * framework only backs every dma_buf fd with the anon_file struct file, 253 * i.e. all dma_bufs share the same file. 254 * 255 * Hence exporters need to setup their own file (and address_space) 256 * association by setting vma->vm_file and adjusting vma->vm_pgoff in 257 * the dma_buf mmap callback. In the specific case of a gem driver the 258 * exporter could use the shmem file already provided by gem (and set 259 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the 260 * corresponding range of the struct address_space associated with their 261 * own file. 262 * 263 * This callback is optional. 264 * 265 * Returns: 266 * 267 * 0 on success or a negative error code on failure. 268 */ 269 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 270 271 int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); 272 void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); 273 }; 274 275 /** 276 * struct dma_buf - shared buffer object 277 * @size: size of the buffer; invariant over the lifetime of the buffer. 278 * @file: file pointer used for sharing buffers across, and for refcounting. 279 * @attachments: list of dma_buf_attachment that denotes all devices attached, 280 * protected by dma_resv lock. 281 * @ops: dma_buf_ops associated with this buffer object. 282 * @lock: used internally to serialize list manipulation, attach/detach and 283 * vmap/unmap 284 * @vmapping_counter: used internally to refcnt the vmaps 285 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 286 * @exp_name: name of the exporter; useful for debugging. 287 * @name: userspace-provided name; useful for accounting and debugging, 288 * protected by @resv. 289 * @name_lock: spinlock to protect name access 290 * @owner: pointer to exporter module; used for refcounting when exporter is a 291 * kernel module. 292 * @list_node: node for dma_buf accounting and debugging. 293 * @priv: exporter specific private data for this buffer object. 294 * @resv: reservation object linked to this dma-buf 295 * @poll: for userspace poll support 296 * @cb_excl: for userspace poll support 297 * @cb_shared: for userspace poll support 298 * 299 * This represents a shared buffer, created by calling dma_buf_export(). The 300 * userspace representation is a normal file descriptor, which can be created by 301 * calling dma_buf_fd(). 302 * 303 * Shared dma buffers are reference counted using dma_buf_put() and 304 * get_dma_buf(). 305 * 306 * Device DMA access is handled by the separate &struct dma_buf_attachment. 307 */ 308 struct dma_buf { 309 size_t size; 310 struct file *file; 311 struct list_head attachments; 312 const struct dma_buf_ops *ops; 313 struct mutex lock; 314 unsigned vmapping_counter; 315 struct dma_buf_map vmap_ptr; 316 const char *exp_name; 317 const char *name; 318 spinlock_t name_lock; 319 struct module *owner; 320 struct list_head list_node; 321 void *priv; 322 struct dma_resv *resv; 323 324 /* poll support */ 325 wait_queue_head_t poll; 326 327 struct dma_buf_poll_cb_t { 328 struct dma_fence_cb cb; 329 wait_queue_head_t *poll; 330 331 __poll_t active; 332 } cb_excl, cb_shared; 333 }; 334 335 /** 336 * struct dma_buf_attach_ops - importer operations for an attachment 337 * 338 * Attachment operations implemented by the importer. 339 */ 340 struct dma_buf_attach_ops { 341 /** 342 * @allow_peer2peer: 343 * 344 * If this is set to true the importer must be able to handle peer 345 * resources without struct pages. 346 */ 347 bool allow_peer2peer; 348 349 /** 350 * @move_notify: [optional] notification that the DMA-buf is moving 351 * 352 * If this callback is provided the framework can avoid pinning the 353 * backing store while mappings exists. 354 * 355 * This callback is called with the lock of the reservation object 356 * associated with the dma_buf held and the mapping function must be 357 * called with this lock held as well. This makes sure that no mapping 358 * is created concurrently with an ongoing move operation. 359 * 360 * Mappings stay valid and are not directly affected by this callback. 361 * But the DMA-buf can now be in a different physical location, so all 362 * mappings should be destroyed and re-created as soon as possible. 363 * 364 * New mappings can be created after this callback returns, and will 365 * point to the new location of the DMA-buf. 366 */ 367 void (*move_notify)(struct dma_buf_attachment *attach); 368 }; 369 370 /** 371 * struct dma_buf_attachment - holds device-buffer attachment data 372 * @dmabuf: buffer for this attachment. 373 * @dev: device attached to the buffer. 374 * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. 375 * @sgt: cached mapping. 376 * @dir: direction of cached mapping. 377 * @peer2peer: true if the importer can handle peer resources without pages. 378 * @priv: exporter specific attachment data. 379 * @importer_ops: importer operations for this attachment, if provided 380 * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. 381 * @importer_priv: importer specific attachment data. 382 * 383 * This structure holds the attachment information between the dma_buf buffer 384 * and its user device(s). The list contains one attachment struct per device 385 * attached to the buffer. 386 * 387 * An attachment is created by calling dma_buf_attach(), and released again by 388 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a 389 * transfer is created by dma_buf_map_attachment() and freed again by calling 390 * dma_buf_unmap_attachment(). 391 */ 392 struct dma_buf_attachment { 393 struct dma_buf *dmabuf; 394 struct device *dev; 395 struct list_head node; 396 struct sg_table *sgt; 397 enum dma_data_direction dir; 398 bool peer2peer; 399 const struct dma_buf_attach_ops *importer_ops; 400 void *importer_priv; 401 void *priv; 402 }; 403 404 /** 405 * struct dma_buf_export_info - holds information needed to export a dma_buf 406 * @exp_name: name of the exporter - useful for debugging. 407 * @owner: pointer to exporter module - used for refcounting kernel module 408 * @ops: Attach allocator-defined dma buf ops to the new buffer 409 * @size: Size of the buffer - invariant over the lifetime of the buffer 410 * @flags: mode flags for the file 411 * @resv: reservation-object, NULL to allocate default one 412 * @priv: Attach private data of allocator to this buffer 413 * 414 * This structure holds the information required to export the buffer. Used 415 * with dma_buf_export() only. 416 */ 417 struct dma_buf_export_info { 418 const char *exp_name; 419 struct module *owner; 420 const struct dma_buf_ops *ops; 421 size_t size; 422 int flags; 423 struct dma_resv *resv; 424 void *priv; 425 }; 426 427 /** 428 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters 429 * @name: export-info name 430 * 431 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, 432 * zeroes it out and pre-populates exp_name in it. 433 */ 434 #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ 435 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ 436 .owner = THIS_MODULE } 437 438 /** 439 * get_dma_buf - convenience wrapper for get_file. 440 * @dmabuf: [in] pointer to dma_buf 441 * 442 * Increments the reference count on the dma-buf, needed in case of drivers 443 * that either need to create additional references to the dmabuf on the 444 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 445 * so that subsequent exports don't create a new dmabuf. 446 */ 447 static inline void get_dma_buf(struct dma_buf *dmabuf) 448 { 449 get_file(dmabuf->file); 450 } 451 452 /** 453 * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings. 454 * @dmabuf: the DMA-buf to check 455 * 456 * Returns true if a DMA-buf exporter wants to be called with the dma_resv 457 * locked for the map/unmap callbacks, false if it doesn't wants to be called 458 * with the lock held. 459 */ 460 static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) 461 { 462 return !!dmabuf->ops->pin; 463 } 464 465 /** 466 * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic 467 * mappinsg 468 * @attach: the DMA-buf attachment to check 469 * 470 * Returns true if a DMA-buf importer wants to call the map/unmap functions with 471 * the dma_resv lock held. 472 */ 473 static inline bool 474 dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) 475 { 476 return !!attach->importer_ops; 477 } 478 479 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 480 struct device *dev); 481 struct dma_buf_attachment * 482 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, 483 const struct dma_buf_attach_ops *importer_ops, 484 void *importer_priv); 485 void dma_buf_detach(struct dma_buf *dmabuf, 486 struct dma_buf_attachment *attach); 487 int dma_buf_pin(struct dma_buf_attachment *attach); 488 void dma_buf_unpin(struct dma_buf_attachment *attach); 489 490 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); 491 492 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 493 struct dma_buf *dma_buf_get(int fd); 494 void dma_buf_put(struct dma_buf *dmabuf); 495 496 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 497 enum dma_data_direction); 498 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 499 enum dma_data_direction); 500 void dma_buf_move_notify(struct dma_buf *dma_buf); 501 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 502 enum dma_data_direction dir); 503 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 504 enum dma_data_direction dir); 505 506 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 507 unsigned long); 508 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); 509 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); 510 #endif /* __DMA_BUF_H__ */ 511