1 /* 2 * Header file for dma buffer sharing framework. 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Author: Sumit Semwal <sumit.semwal@ti.com> 6 * 7 * Many thanks to linaro-mm-sig list, and specially 8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and 9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and 10 * refining of this idea. 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but WITHOUT 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 19 * more details. 20 * 21 * You should have received a copy of the GNU General Public License along with 22 * this program. If not, see <http://www.gnu.org/licenses/>. 23 */ 24 #ifndef __DMA_BUF_H__ 25 #define __DMA_BUF_H__ 26 27 #include <linux/file.h> 28 #include <linux/err.h> 29 #include <linux/scatterlist.h> 30 #include <linux/list.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/fs.h> 33 34 struct device; 35 struct dma_buf; 36 struct dma_buf_attachment; 37 38 /** 39 * struct dma_buf_ops - operations possible on struct dma_buf 40 * @attach: [optional] allows different devices to 'attach' themselves to the 41 * given buffer. It might return -EBUSY to signal that backing storage 42 * is already allocated and incompatible with the requirements 43 * of requesting device. 44 * @detach: [optional] detach a given device from this buffer. 45 * @map_dma_buf: returns list of scatter pages allocated, increases usecount 46 * of the buffer. Requires atleast one attach to be called 47 * before. Returned sg list should already be mapped into 48 * _device_ address space. This call may sleep. May also return 49 * -EINTR. Should return -EINVAL if attach hasn't been called yet. 50 * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter 51 * pages. 52 * @release: release this buffer; to be called after the last dma_buf_put. 53 * @begin_cpu_access: [optional] called before cpu access to invalidate cpu 54 * caches and allocate backing storage (if not yet done) 55 * respectively pin the objet into memory. 56 * @end_cpu_access: [optional] called after cpu access to flush cashes. 57 * @kmap_atomic: maps a page from the buffer into kernel address 58 * space, users may not block until the subsequent unmap call. 59 * This callback must not sleep. 60 * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer. 61 * This Callback must not sleep. 62 * @kmap: maps a page from the buffer into kernel address space. 63 * @kunmap: [optional] unmaps a page from the buffer. 64 * @mmap: used to expose the backing storage to userspace. Note that the 65 * mapping needs to be coherent - if the exporter doesn't directly 66 * support this, it needs to fake coherency by shooting down any ptes 67 * when transitioning away from the cpu domain. 68 * @vmap: [optional] creates a virtual mapping for the buffer into kernel 69 * address space. Same restrictions as for vmap and friends apply. 70 * @vunmap: [optional] unmaps a vmap from the buffer 71 */ 72 struct dma_buf_ops { 73 int (*attach)(struct dma_buf *, struct device *, 74 struct dma_buf_attachment *); 75 76 void (*detach)(struct dma_buf *, struct dma_buf_attachment *); 77 78 /* For {map,unmap}_dma_buf below, any specific buffer attributes 79 * required should get added to device_dma_parameters accessible 80 * via dev->dma_params. 81 */ 82 struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, 83 enum dma_data_direction); 84 void (*unmap_dma_buf)(struct dma_buf_attachment *, 85 struct sg_table *, 86 enum dma_data_direction); 87 /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY 88 * if the call would block. 89 */ 90 91 /* after final dma_buf_put() */ 92 void (*release)(struct dma_buf *); 93 94 int (*begin_cpu_access)(struct dma_buf *, size_t, size_t, 95 enum dma_data_direction); 96 void (*end_cpu_access)(struct dma_buf *, size_t, size_t, 97 enum dma_data_direction); 98 void *(*kmap_atomic)(struct dma_buf *, unsigned long); 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 100 void *(*kmap)(struct dma_buf *, unsigned long); 101 void (*kunmap)(struct dma_buf *, unsigned long, void *); 102 103 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); 104 105 void *(*vmap)(struct dma_buf *); 106 void (*vunmap)(struct dma_buf *, void *vaddr); 107 }; 108 109 /** 110 * struct dma_buf - shared buffer object 111 * @size: size of the buffer 112 * @file: file pointer used for sharing buffers across, and for refcounting. 113 * @attachments: list of dma_buf_attachment that denotes all devices attached. 114 * @ops: dma_buf_ops associated with this buffer object. 115 * @priv: exporter specific private data for this buffer object. 116 */ 117 struct dma_buf { 118 size_t size; 119 struct file *file; 120 struct list_head attachments; 121 const struct dma_buf_ops *ops; 122 /* mutex to serialize list manipulation and attach/detach */ 123 struct mutex lock; 124 void *priv; 125 }; 126 127 /** 128 * struct dma_buf_attachment - holds device-buffer attachment data 129 * @dmabuf: buffer for this attachment. 130 * @dev: device attached to the buffer. 131 * @node: list of dma_buf_attachment. 132 * @priv: exporter specific attachment data. 133 * 134 * This structure holds the attachment information between the dma_buf buffer 135 * and its user device(s). The list contains one attachment struct per device 136 * attached to the buffer. 137 */ 138 struct dma_buf_attachment { 139 struct dma_buf *dmabuf; 140 struct device *dev; 141 struct list_head node; 142 void *priv; 143 }; 144 145 /** 146 * get_dma_buf - convenience wrapper for get_file. 147 * @dmabuf: [in] pointer to dma_buf 148 * 149 * Increments the reference count on the dma-buf, needed in case of drivers 150 * that either need to create additional references to the dmabuf on the 151 * kernel side. For example, an exporter that needs to keep a dmabuf ptr 152 * so that subsequent exports don't create a new dmabuf. 153 */ 154 static inline void get_dma_buf(struct dma_buf *dmabuf) 155 { 156 get_file(dmabuf->file); 157 } 158 159 #ifdef CONFIG_DMA_SHARED_BUFFER 160 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 161 struct device *dev); 162 void dma_buf_detach(struct dma_buf *dmabuf, 163 struct dma_buf_attachment *dmabuf_attach); 164 struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops, 165 size_t size, int flags); 166 int dma_buf_fd(struct dma_buf *dmabuf, int flags); 167 struct dma_buf *dma_buf_get(int fd); 168 void dma_buf_put(struct dma_buf *dmabuf); 169 170 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *, 171 enum dma_data_direction); 172 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *, 173 enum dma_data_direction); 174 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, 175 enum dma_data_direction dir); 176 void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len, 177 enum dma_data_direction dir); 178 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); 179 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 180 void *dma_buf_kmap(struct dma_buf *, unsigned long); 181 void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 182 183 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, 184 unsigned long); 185 void *dma_buf_vmap(struct dma_buf *); 186 void dma_buf_vunmap(struct dma_buf *, void *vaddr); 187 #else 188 189 static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 190 struct device *dev) 191 { 192 return ERR_PTR(-ENODEV); 193 } 194 195 static inline void dma_buf_detach(struct dma_buf *dmabuf, 196 struct dma_buf_attachment *dmabuf_attach) 197 { 198 return; 199 } 200 201 static inline struct dma_buf *dma_buf_export(void *priv, 202 const struct dma_buf_ops *ops, 203 size_t size, int flags) 204 { 205 return ERR_PTR(-ENODEV); 206 } 207 208 static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags) 209 { 210 return -ENODEV; 211 } 212 213 static inline struct dma_buf *dma_buf_get(int fd) 214 { 215 return ERR_PTR(-ENODEV); 216 } 217 218 static inline void dma_buf_put(struct dma_buf *dmabuf) 219 { 220 return; 221 } 222 223 static inline struct sg_table *dma_buf_map_attachment( 224 struct dma_buf_attachment *attach, enum dma_data_direction write) 225 { 226 return ERR_PTR(-ENODEV); 227 } 228 229 static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 230 struct sg_table *sg, enum dma_data_direction dir) 231 { 232 return; 233 } 234 235 static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 236 size_t start, size_t len, 237 enum dma_data_direction dir) 238 { 239 return -ENODEV; 240 } 241 242 static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf, 243 size_t start, size_t len, 244 enum dma_data_direction dir) 245 { 246 } 247 248 static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, 249 unsigned long pnum) 250 { 251 return NULL; 252 } 253 254 static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, 255 unsigned long pnum, void *vaddr) 256 { 257 } 258 259 static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum) 260 { 261 return NULL; 262 } 263 264 static inline void dma_buf_kunmap(struct dma_buf *dmabuf, 265 unsigned long pnum, void *vaddr) 266 { 267 } 268 269 static inline int dma_buf_mmap(struct dma_buf *dmabuf, 270 struct vm_area_struct *vma, 271 unsigned long pgoff) 272 { 273 return -ENODEV; 274 } 275 276 static inline void *dma_buf_vmap(struct dma_buf *dmabuf) 277 { 278 return NULL; 279 } 280 281 static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) 282 { 283 } 284 #endif /* CONFIG_DMA_SHARED_BUFFER */ 285 286 #endif /* __DMA_BUF_H__ */ 287