1 /* 2 * Header file for dma buffer sharing framework. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 #ifndef __DMA_BUF_H__ 25 #define __DMA_BUF_H__ 26 27 #include <linux/file.h> 28 #include <linux/err.h> 29 #include <linux/scatterlist.h> 30 #include <linux/list.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/fs.h> 33 #include <linux/dma-fence.h> 34 #include <linux/wait.h> 35 36 struct device; 37 struct dma_buf; 38 struct dma_buf_attachment; 39 40 /** 41 * struct dma_buf_ops - operations possible on struct dma_buf 42 * @map_atomic: maps a page from the buffer into kernel address 43 * space, users may not block until the subsequent unmap call. 44 * This callback must not sleep. 45 * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 46 * This Callback must not sleep. 47 * @map: maps a page from the buffer into kernel address space. 48 * @unmap: [optional] unmaps a page from the buffer. 49 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 50 * address space. Same restrictions as for vmap and friends apply. 51 * @vunmap: [optional] unmaps a vmap from the buffer 52 */ 53 struct dma_buf_ops { 54 /** 55 * @attach: 56 * 57 * This is called from dma_buf_attach() to make sure that a given 58 * &device can access the provided &dma_buf. Exporters which support 59 * buffer objects in special locations like VRAM or device-specific 60 * carveout areas should check whether the buffer could be move to 61 * system memory (or directly accessed by the provided device), and 62 * otherwise need to fail the attach operation. 63 * 64 * The exporter should also in general check whether the current 65 * allocation fullfills the DMA constraints of the new device. If this 66 * is not the case, and the allocation cannot be moved, it should also 67 * fail the attach operation. 68 * 69 * Any exporter-private housekeeping data can be stored in the 70 * &dma_buf_attachment.priv pointer. 71 * 72 * This callback is optional. 73 * 74 * Returns: 75 * 76 * 0 on success, negative error code on failure. It might return -EBUSY 77 * to signal that backing storage is already allocated and incompatible 78 * with the requirements of requesting device. 79 */ 80 int (*attach)(struct dma_buf *, struct device *, 81 struct dma_buf_attachment *); 82 83 /** 84 * @detach: 85 * 86 * This is called by dma_buf_detach() to release a &dma_buf_attachment. 87 * Provided so that exporters can clean up any housekeeping for an 88 * &dma_buf_attachment. 89 * 90 * This callback is optional. 91 */ 92 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 93 94 /** 95 * @map_dma_buf: 96 * 97 * This is called by dma_buf_map_attachment() and is used to map a 98 * shared &dma_buf into device address space, and it is mandatory. It 99 * can only be called if @attach has been called successfully. This 100 * essentially pins the DMA buffer into place, and it cannot be moved 101 * any more 102 * 103 * This call may sleep, e.g. when the backing storage first needs to be 104 * allocated, or moved to a location suitable for all currently attached 105 * devices. 106 * 107 * Note that any specific buffer attributes required for this function 108 * should get added to device_dma_parameters accessible via 109 * &device.dma_params from the &dma_buf_attachment. The @attach callback 110 * should also check these constraints. 111 * 112 * If this is being called for the first time, the exporter can now 113 * choose to scan through the list of attachments for this buffer, 114 * collate the requirements of the attached devices, and choose an 115 * appropriate backing storage for the buffer. 116 * 117 * Based on enum dma_data_direction, it might be possible to have 118 * multiple users accessing at the same time (for reading, maybe), or 119 * any other kind of sharing that the exporter might wish to make 120 * available to buffer-users. 121 * 122 * Returns: 123 * 124 * A &sg_table scatter list of or the backing storage of the DMA buffer, 125 * already mapped into the device address space of the &device attached 126 * with the provided &dma_buf_attachment. 127 * 128 * On failure, returns a negative error value wrapped into a pointer. 129 * May also return -EINTR when a signal was received while being 130 * blocked. 131 */ 132 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 133 enum dma_data_direction); 134 /** 135 * @unmap_dma_buf: 136 * 137 * This is called by dma_buf_unmap_attachment() and should unmap and 138 * release the &sg_table allocated in @map_dma_buf, and it is mandatory. 139 * It should also unpin the backing storage if this is the last mapping 140 * of the DMA buffer, it the exporter supports backing storage 141 * migration. 142 */ 143 void (*unmap_dma_buf)(struct dma_buf_attachment *, 144 struct sg_table *, 145 enum dma_data_direction); 146 147 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 148 * if the call would block. 149 */ 150 151 /** 152 * @release: 153 * 154 * Called after the last dma_buf_put to release the &dma_buf, and 155 * mandatory. 156 */ 157 void (*release)(struct dma_buf *); 158 159 /** 160 * @begin_cpu_access: 161 * 162 * This is called from dma_buf_begin_cpu_access() and allows the 163 * exporter to ensure that the memory is actually available for cpu 164 * access - the exporter might need to allocate or swap-in and pin the 165 * backing storage. The exporter also needs to ensure that cpu access is 166 * coherent for the access direction. The direction can be used by the 167 * exporter to optimize the cache flushing, i.e. access with a different 168 * direction (read instead of write) might return stale or even bogus 169 * data (e.g. when the exporter needs to copy the data to temporary 170 * storage). 171 * 172 * This callback is optional. 173 * 174 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command 175 * from userspace (where storage shouldn't be pinned to avoid handing 176 * de-factor mlock rights to userspace) and for the kernel-internal 177 * users of the various kmap interfaces, where the backing storage must 178 * be pinned to guarantee that the atomic kmap calls can succeed. Since 179 * there's no in-kernel users of the kmap interfaces yet this isn't a 180 * real problem. 181 * 182 * Returns: 183 * 184 * 0 on success or a negative error code on failure. This can for 185 * example fail when the backing storage can't be allocated. Can also 186 * return -ERESTARTSYS or -EINTR when the call has been interrupted and 187 * needs to be restarted. 188 */ 189 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 190 191 /** 192 * @end_cpu_access: 193 * 194 * This is called from dma_buf_end_cpu_access() when the importer is 195 * done accessing the CPU. The exporter can use this to flush caches and 196 * unpin any resources pinned in @begin_cpu_access. 197 * The result of any dma_buf kmap calls after end_cpu_access is 198 * undefined. 199 * 200 * This callback is optional. 201 * 202 * Returns: 203 * 204 * 0 on success or a negative error code on failure. Can return 205 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs 206 * to be restarted. 207 */ 208 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 209 void *(*map_atomic)(struct dma_buf *, unsigned long); 210 void (*unmap_atomic)(struct dma_buf *, unsigned long, void *); 211 void *(*map)(struct dma_buf *, unsigned long); 212 void (*unmap)(struct dma_buf *, unsigned long, void *); 213 214 /** 215 * @mmap: 216 * 217 * This callback is used by the dma_buf_mmap() function 218 * 219 * Note that the mapping needs to be incoherent, userspace is expected 220 * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. 221 * 222 * Because dma-buf buffers have invariant size over their lifetime, the 223 * dma-buf core checks whether a vma is too large and rejects such 224 * mappings. The exporter hence does not need to duplicate this check. 225 * Drivers do not need to check this themselves. 226 * 227 * If an exporter needs to manually flush caches and hence needs to fake 228 * coherency for mmap support, it needs to be able to zap all the ptes 229 * pointing at the backing storage. Now linux mm needs a struct 230 * address_space associated with the struct file stored in vma->vm_file 231 * to do that with the function unmap_mapping_range. But the dma_buf 232 * framework only backs every dma_buf fd with the anon_file struct file, 233 * i.e. all dma_bufs share the same file. 234 * 235 * Hence exporters need to setup their own file (and address_space) 236 * association by setting vma->vm_file and adjusting vma->vm_pgoff in 237 * the dma_buf mmap callback. In the specific case of a gem driver the 238 * exporter could use the shmem file already provided by gem (and set 239 * vm_pgoff = 0). Exporters can then zap ptes by unmapping the 240 * corresponding range of the struct address_space associated with their 241 * own file. 242 * 243 * This callback is optional. 244 * 245 * Returns: 246 * 247 * 0 on success or a negative error code on failure. 248 */ 249 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 250 251 void *(*vmap)(struct dma_buf *); 252 void (*vunmap)(struct dma_buf *, void *vaddr); 253 }; 254 255 /** 256 * struct dma_buf - shared buffer object 257 * @size: size of the buffer 258 * @file: file pointer used for sharing buffers across, and for refcounting. 259 * @attachments: list of dma_buf_attachment that denotes all devices attached. 260 * @ops: dma_buf_ops associated with this buffer object. 261 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap 262 * @vmapping_counter: used internally to refcnt the vmaps 263 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 264 * @exp_name: name of the exporter; useful for debugging. 265 * @owner: pointer to exporter module; used for refcounting when exporter is a 266 * kernel module. 267 * @list_node: node for dma_buf accounting and debugging. 268 * @priv: exporter specific private data for this buffer object. 269 * @resv: reservation object linked to this dma-buf 270 * @poll: for userspace poll support 271 * @cb_excl: for userspace poll support 272 * @cb_shared: for userspace poll support 273 * 274 * This represents a shared buffer, created by calling dma_buf_export(). The 275 * userspace representation is a normal file descriptor, which can be created by 276 * calling dma_buf_fd(). 277 * 278 * Shared dma buffers are reference counted using dma_buf_put() and 279 * get_dma_buf(). 280 * 281 * Device DMA access is handled by the separate &struct dma_buf_attachment. 282 */ 283 struct dma_buf { 284 size_t size; 285 struct file *file; 286 struct list_head attachments; 287 const struct dma_buf_ops *ops; 288 struct mutex lock; 289 unsigned vmapping_counter; 290 void *vmap_ptr; 291 const char *exp_name; 292 struct module *owner; 293 struct list_head list_node; 294 void *priv; 295 struct reservation_object *resv; 296 297 /* poll support */ 298 wait_queue_head_t poll; 299 300 struct dma_buf_poll_cb_t { 301 struct dma_fence_cb cb; 302 wait_queue_head_t *poll; 303 304 __poll_t active; 305 } cb_excl, cb_shared; 306 }; 307 308 /** 309 * struct dma_buf_attachment - holds device-buffer attachment data 310 * @dmabuf: buffer for this attachment. 311 * @dev: device attached to the buffer. 312 * @node: list of dma_buf_attachment. 313 * @priv: exporter specific attachment data. 314 * 315 * This structure holds the attachment information between the dma_buf buffer 316 * and its user device(s). The list contains one attachment struct per device 317 * attached to the buffer. 318 * 319 * An attachment is created by calling dma_buf_attach(), and released again by 320 * calling dma_buf_detach(). The DMA mapping itself needed to initiate a 321 * transfer is created by dma_buf_map_attachment() and freed again by calling 322 * dma_buf_unmap_attachment(). 323 */ 324 struct dma_buf_attachment { 325 struct dma_buf *dmabuf; 326 struct device *dev; 327 struct list_head node; 328 void *priv; 329 }; 330 331 /** 332 * struct dma_buf_export_info - holds information needed to export a dma_buf 333 * @exp_name: name of the exporter - useful for debugging. 334 * @owner: pointer to exporter module - used for refcounting kernel module 335 * @ops: Attach allocator-defined dma buf ops to the new buffer 336 * @size: Size of the buffer 337 * @flags: mode flags for the file 338 * @resv: reservation-object, NULL to allocate default one 339 * @priv: Attach private data of allocator to this buffer 340 * 341 * This structure holds the information required to export the buffer. Used 342 * with dma_buf_export() only. 343 */ 344 struct dma_buf_export_info { 345 const char *exp_name; 346 struct module *owner; 347 const struct dma_buf_ops *ops; 348 size_t size; 349 int flags; 350 struct reservation_object *resv; 351 void *priv; 352 }; 353 354 /** 355 * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters 356 * @name: export-info name 357 * 358 * DEFINE_DMA_BUF_EXPORT_INFO macro defines the &struct dma_buf_export_info, 359 * zeroes it out and pre-populates exp_name in it. 360 */ 361 #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ 362 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ 363 .owner = THIS_MODULE } 364 365 /** 366 * get_dma_buf - convenience wrapper for get_file. 367 * @dmabuf: [in] pointer to dma_buf 368 * 369 * Increments the reference count on the dma-buf, needed in case of drivers 370 * that either need to create additional references to the dmabuf on the 371 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 372 * so that subsequent exports don't create a new dmabuf. 373 */ 374 static inline void get_dma_buf(struct dma_buf *dmabuf) 375 { 376 get_file(dmabuf->file); 377 } 378 379 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 380 struct device *dev); 381 void dma_buf_detach(struct dma_buf *dmabuf, 382 struct dma_buf_attachment *dmabuf_attach); 383 384 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); 385 386 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 387 struct dma_buf *dma_buf_get(int fd); 388 void dma_buf_put(struct dma_buf *dmabuf); 389 390 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 391 enum dma_data_direction); 392 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 393 enum dma_data_direction); 394 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 395 enum dma_data_direction dir); 396 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 397 enum dma_data_direction dir); 398 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 399 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 400 void *dma_buf_kmap(struct dma_buf *, unsigned long); 401 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 402 403 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 404 unsigned long); 405 void *dma_buf_vmap(struct dma_buf *); 406 void dma_buf_vunmap(struct dma_buf *, void *vaddr); 407 #endif /* __DMA_BUF_H__ */ 408