1 /* 2 * Header file for dma buffer sharing framework. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 #ifndef __DMA_BUF_H__ 25 #define __DMA_BUF_H__ 26 27 #include <linux/file.h> 28 #include <linux/err.h> 29 #include <linux/scatterlist.h> 30 #include <linux/list.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/fs.h> 33 #include <linux/fence.h> 34 #include <linux/wait.h> 35 36 struct device; 37 struct dma_buf; 38 struct dma_buf_attachment; 39 40 /** 41 * struct dma_buf_ops - operations possible on struct dma_buf 42 * @attach: [optional] allows different devices to 'attach' themselves to the 43 * given buffer. It might return -EBUSY to signal that backing storage 44 * is already allocated and incompatible with the requirements 45 * of requesting device. 46 * @detach: [optional] detach a given device from this buffer. 47 * @map_dma_buf: returns list of scatter pages allocated, increases usecount 48 * of the buffer. Requires atleast one attach to be called 49 * before. Returned sg list should already be mapped into 50 * _device_ address space. This call may sleep. May also return 51 * -EINTR. Should return -EINVAL if attach hasn't been called yet. 52 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter 53 * pages. 54 * @release: release this buffer; to be called after the last dma_buf_put. 55 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu 56 * caches and allocate backing storage (if not yet done) 57 * respectively pin the object into memory. 58 * @end_cpu_access: [optional] called after cpu access to flush caches. 59 * @kmap_atomic: maps a page from the buffer into kernel address 60 * space, users may not block until the subsequent unmap call. 61 * This callback must not sleep. 62 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 63 * This Callback must not sleep. 64 * @kmap: maps a page from the buffer into kernel address space. 65 * @kunmap: [optional] unmaps a page from the buffer. 66 * @mmap: used to expose the backing storage to userspace. Note that the 67 * mapping needs to be coherent - if the exporter doesn't directly 68 * support this, it needs to fake coherency by shooting down any ptes 69 * when transitioning away from the cpu domain. 70 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 71 * address space. Same restrictions as for vmap and friends apply. 72 * @vunmap: [optional] unmaps a vmap from the buffer 73 */ 74 struct dma_buf_ops { 75 int (*attach)(struct dma_buf *, struct device *, 76 struct dma_buf_attachment *); 77 78 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 79 80 /* For {map,unmap}_dma_buf below, any specific buffer attributes 81 * required should get added to device_dma_parameters accessible 82 * via dev->dma_params. 83 */ 84 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 85 enum dma_data_direction); 86 void (*unmap_dma_buf)(struct dma_buf_attachment *, 87 struct sg_table *, 88 enum dma_data_direction); 89 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 90 * if the call would block. 91 */ 92 93 /* after final dma_buf_put() */ 94 void (*release)(struct dma_buf *); 95 96 int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); 97 int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); 98 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 100 void *(*kmap)(struct dma_buf *, unsigned long); 101 void (*kunmap)(struct dma_buf *, unsigned long, void *); 102 103 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 104 105 void *(*vmap)(struct dma_buf *); 106 void (*vunmap)(struct dma_buf *, void *vaddr); 107 }; 108 109 /** 110 * struct dma_buf - shared buffer object 111 * @size: size of the buffer 112 * @file: file pointer used for sharing buffers across, and for refcounting. 113 * @attachments: list of dma_buf_attachment that denotes all devices attached. 114 * @ops: dma_buf_ops associated with this buffer object. 115 * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap 116 * @vmapping_counter: used internally to refcnt the vmaps 117 * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 118 * @exp_name: name of the exporter; useful for debugging. 119 * @owner: pointer to exporter module; used for refcounting when exporter is a 120 * kernel module. 121 * @list_node: node for dma_buf accounting and debugging. 122 * @priv: exporter specific private data for this buffer object. 123 * @resv: reservation object linked to this dma-buf 124 * @poll: for userspace poll support 125 * @cb_excl: for userspace poll support 126 * @cb_shared: for userspace poll support 127 */ 128 struct dma_buf { 129 size_t size; 130 struct file *file; 131 struct list_head attachments; 132 const struct dma_buf_ops *ops; 133 struct mutex lock; 134 unsigned vmapping_counter; 135 void *vmap_ptr; 136 const char *exp_name; 137 struct module *owner; 138 struct list_head list_node; 139 void *priv; 140 struct reservation_object *resv; 141 142 /* poll support */ 143 wait_queue_head_t poll; 144 145 struct dma_buf_poll_cb_t { 146 struct fence_cb cb; 147 wait_queue_head_t *poll; 148 149 unsigned long active; 150 } cb_excl, cb_shared; 151 }; 152 153 /** 154 * struct dma_buf_attachment - holds device-buffer attachment data 155 * @dmabuf: buffer for this attachment. 156 * @dev: device attached to the buffer. 157 * @node: list of dma_buf_attachment. 158 * @priv: exporter specific attachment data. 159 * 160 * This structure holds the attachment information between the dma_buf buffer 161 * and its user device(s). The list contains one attachment struct per device 162 * attached to the buffer. 163 */ 164 struct dma_buf_attachment { 165 struct dma_buf *dmabuf; 166 struct device *dev; 167 struct list_head node; 168 void *priv; 169 }; 170 171 /** 172 * struct dma_buf_export_info - holds information needed to export a dma_buf 173 * @exp_name: name of the exporter - useful for debugging. 174 * @owner: pointer to exporter module - used for refcounting kernel module 175 * @ops: Attach allocator-defined dma buf ops to the new buffer 176 * @size: Size of the buffer 177 * @flags: mode flags for the file 178 * @resv: reservation-object, NULL to allocate default one 179 * @priv: Attach private data of allocator to this buffer 180 * 181 * This structure holds the information required to export the buffer. Used 182 * with dma_buf_export() only. 183 */ 184 struct dma_buf_export_info { 185 const char *exp_name; 186 struct module *owner; 187 const struct dma_buf_ops *ops; 188 size_t size; 189 int flags; 190 struct reservation_object *resv; 191 void *priv; 192 }; 193 194 /** 195 * helper macro for exporters; zeros and fills in most common values 196 * 197 * @name: export-info name 198 */ 199 #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ 200 struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ 201 .owner = THIS_MODULE } 202 203 /** 204 * get_dma_buf - convenience wrapper for get_file. 205 * @dmabuf: [in] pointer to dma_buf 206 * 207 * Increments the reference count on the dma-buf, needed in case of drivers 208 * that either need to create additional references to the dmabuf on the 209 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 210 * so that subsequent exports don't create a new dmabuf. 211 */ 212 static inline void get_dma_buf(struct dma_buf *dmabuf) 213 { 214 get_file(dmabuf->file); 215 } 216 217 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 218 struct device *dev); 219 void dma_buf_detach(struct dma_buf *dmabuf, 220 struct dma_buf_attachment *dmabuf_attach); 221 222 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info); 223 224 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 225 struct dma_buf *dma_buf_get(int fd); 226 void dma_buf_put(struct dma_buf *dmabuf); 227 228 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 229 enum dma_data_direction); 230 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 231 enum dma_data_direction); 232 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 233 enum dma_data_direction dir); 234 int dma_buf_end_cpu_access(struct dma_buf *dma_buf, 235 enum dma_data_direction dir); 236 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 237 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 238 void *dma_buf_kmap(struct dma_buf *, unsigned long); 239 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 240 241 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 242 unsigned long); 243 void *dma_buf_vmap(struct dma_buf *); 244 void dma_buf_vunmap(struct dma_buf *, void *vaddr); 245 #endif /* __DMA_BUF_H__ */ 246