1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * DMABUF CMA heap exporter 4 * 5 * Copyright (C) 2012, 2019, 2020 Linaro Ltd. 6 * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. 7 * 8 * Also utilizing parts of Andrew Davis' SRAM heap: 9 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ 10 * Andrew F. Davis <afd@ti.com> 11 */ 12 #include <linux/cma.h> 13 #include <linux/dma-buf.h> 14 #include <linux/dma-heap.h> 15 #include <linux/dma-map-ops.h> 16 #include <linux/dma-resv.h> 17 #include <linux/err.h> 18 #include <linux/highmem.h> 19 #include <linux/io.h> 20 #include <linux/mm.h> 21 #include <linux/module.h> 22 #include <linux/scatterlist.h> 23 #include <linux/slab.h> 24 #include <linux/vmalloc.h> 25 26 27 struct cma_heap { 28 struct dma_heap *heap; 29 struct cma *cma; 30 }; 31 32 struct cma_heap_buffer { 33 struct cma_heap *heap; 34 struct list_head attachments; 35 struct mutex lock; 36 unsigned long len; 37 struct page *cma_pages; 38 struct page **pages; 39 pgoff_t pagecount; 40 int vmap_cnt; 41 void *vaddr; 42 }; 43 44 struct dma_heap_attachment { 45 struct device *dev; 46 struct sg_table table; 47 struct list_head list; 48 bool mapped; 49 }; 50 51 static int cma_heap_attach(struct dma_buf *dmabuf, 52 struct dma_buf_attachment *attachment) 53 { 54 struct cma_heap_buffer *buffer = dmabuf->priv; 55 struct dma_heap_attachment *a; 56 int ret; 57 58 a = kzalloc(sizeof(*a), GFP_KERNEL); 59 if (!a) 60 return -ENOMEM; 61 62 ret = sg_alloc_table_from_pages(&a->table, buffer->pages, 63 buffer->pagecount, 0, 64 buffer->pagecount << PAGE_SHIFT, 65 GFP_KERNEL); 66 if (ret) { 67 kfree(a); 68 return ret; 69 } 70 71 a->dev = attachment->dev; 72 INIT_LIST_HEAD(&a->list); 73 a->mapped = false; 74 75 attachment->priv = a; 76 77 mutex_lock(&buffer->lock); 78 list_add(&a->list, &buffer->attachments); 79 mutex_unlock(&buffer->lock); 80 81 return 0; 82 } 83 84 static void cma_heap_detach(struct dma_buf *dmabuf, 85 struct dma_buf_attachment *attachment) 86 { 87 struct cma_heap_buffer *buffer = dmabuf->priv; 88 struct dma_heap_attachment *a = attachment->priv; 89 90 mutex_lock(&buffer->lock); 91 list_del(&a->list); 92 mutex_unlock(&buffer->lock); 93 94 sg_free_table(&a->table); 95 kfree(a); 96 } 97 98 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, 99 enum dma_data_direction direction) 100 { 101 struct dma_heap_attachment *a = attachment->priv; 102 struct sg_table *table = &a->table; 103 int ret; 104 105 ret = dma_map_sgtable(attachment->dev, table, direction, 0); 106 if (ret) 107 return ERR_PTR(-ENOMEM); 108 a->mapped = true; 109 return table; 110 } 111 112 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, 113 struct sg_table *table, 114 enum dma_data_direction direction) 115 { 116 struct dma_heap_attachment *a = attachment->priv; 117 118 a->mapped = false; 119 dma_unmap_sgtable(attachment->dev, table, direction, 0); 120 } 121 122 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, 123 enum dma_data_direction direction) 124 { 125 struct cma_heap_buffer *buffer = dmabuf->priv; 126 struct dma_heap_attachment *a; 127 128 mutex_lock(&buffer->lock); 129 130 if (buffer->vmap_cnt) 131 invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); 132 133 list_for_each_entry(a, &buffer->attachments, list) { 134 if (!a->mapped) 135 continue; 136 dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); 137 } 138 mutex_unlock(&buffer->lock); 139 140 return 0; 141 } 142 143 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, 144 enum dma_data_direction direction) 145 { 146 struct cma_heap_buffer *buffer = dmabuf->priv; 147 struct dma_heap_attachment *a; 148 149 mutex_lock(&buffer->lock); 150 151 if (buffer->vmap_cnt) 152 flush_kernel_vmap_range(buffer->vaddr, buffer->len); 153 154 list_for_each_entry(a, &buffer->attachments, list) { 155 if (!a->mapped) 156 continue; 157 dma_sync_sgtable_for_device(a->dev, &a->table, direction); 158 } 159 mutex_unlock(&buffer->lock); 160 161 return 0; 162 } 163 164 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) 165 { 166 struct vm_area_struct *vma = vmf->vma; 167 struct cma_heap_buffer *buffer = vma->vm_private_data; 168 169 if (vmf->pgoff > buffer->pagecount) 170 return VM_FAULT_SIGBUS; 171 172 vmf->page = buffer->pages[vmf->pgoff]; 173 get_page(vmf->page); 174 175 return 0; 176 } 177 178 static const struct vm_operations_struct dma_heap_vm_ops = { 179 .fault = cma_heap_vm_fault, 180 }; 181 182 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 183 { 184 struct cma_heap_buffer *buffer = dmabuf->priv; 185 186 dma_resv_assert_held(dmabuf->resv); 187 188 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) 189 return -EINVAL; 190 191 vma->vm_ops = &dma_heap_vm_ops; 192 vma->vm_private_data = buffer; 193 194 return 0; 195 } 196 197 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) 198 { 199 void *vaddr; 200 201 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); 202 if (!vaddr) 203 return ERR_PTR(-ENOMEM); 204 205 return vaddr; 206 } 207 208 static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map) 209 { 210 struct cma_heap_buffer *buffer = dmabuf->priv; 211 void *vaddr; 212 int ret = 0; 213 214 mutex_lock(&buffer->lock); 215 if (buffer->vmap_cnt) { 216 buffer->vmap_cnt++; 217 iosys_map_set_vaddr(map, buffer->vaddr); 218 goto out; 219 } 220 221 vaddr = cma_heap_do_vmap(buffer); 222 if (IS_ERR(vaddr)) { 223 ret = PTR_ERR(vaddr); 224 goto out; 225 } 226 buffer->vaddr = vaddr; 227 buffer->vmap_cnt++; 228 iosys_map_set_vaddr(map, buffer->vaddr); 229 out: 230 mutex_unlock(&buffer->lock); 231 232 return ret; 233 } 234 235 static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map) 236 { 237 struct cma_heap_buffer *buffer = dmabuf->priv; 238 239 mutex_lock(&buffer->lock); 240 if (!--buffer->vmap_cnt) { 241 vunmap(buffer->vaddr); 242 buffer->vaddr = NULL; 243 } 244 mutex_unlock(&buffer->lock); 245 iosys_map_clear(map); 246 } 247 248 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) 249 { 250 struct cma_heap_buffer *buffer = dmabuf->priv; 251 struct cma_heap *cma_heap = buffer->heap; 252 253 if (buffer->vmap_cnt > 0) { 254 WARN(1, "%s: buffer still mapped in the kernel\n", __func__); 255 vunmap(buffer->vaddr); 256 buffer->vaddr = NULL; 257 } 258 259 /* free page list */ 260 kfree(buffer->pages); 261 /* release memory */ 262 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); 263 kfree(buffer); 264 } 265 266 static const struct dma_buf_ops cma_heap_buf_ops = { 267 .attach = cma_heap_attach, 268 .detach = cma_heap_detach, 269 .map_dma_buf = cma_heap_map_dma_buf, 270 .unmap_dma_buf = cma_heap_unmap_dma_buf, 271 .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, 272 .end_cpu_access = cma_heap_dma_buf_end_cpu_access, 273 .mmap = cma_heap_mmap, 274 .vmap = cma_heap_vmap, 275 .vunmap = cma_heap_vunmap, 276 .release = cma_heap_dma_buf_release, 277 }; 278 279 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, 280 unsigned long len, 281 unsigned long fd_flags, 282 unsigned long heap_flags) 283 { 284 struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); 285 struct cma_heap_buffer *buffer; 286 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 287 size_t size = PAGE_ALIGN(len); 288 pgoff_t pagecount = size >> PAGE_SHIFT; 289 unsigned long align = get_order(size); 290 struct page *cma_pages; 291 struct dma_buf *dmabuf; 292 int ret = -ENOMEM; 293 pgoff_t pg; 294 295 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); 296 if (!buffer) 297 return ERR_PTR(-ENOMEM); 298 299 INIT_LIST_HEAD(&buffer->attachments); 300 mutex_init(&buffer->lock); 301 buffer->len = size; 302 303 if (align > CONFIG_CMA_ALIGNMENT) 304 align = CONFIG_CMA_ALIGNMENT; 305 306 cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); 307 if (!cma_pages) 308 goto free_buffer; 309 310 /* Clear the cma pages */ 311 if (PageHighMem(cma_pages)) { 312 unsigned long nr_clear_pages = pagecount; 313 struct page *page = cma_pages; 314 315 while (nr_clear_pages > 0) { 316 void *vaddr = kmap_atomic(page); 317 318 memset(vaddr, 0, PAGE_SIZE); 319 kunmap_atomic(vaddr); 320 /* 321 * Avoid wasting time zeroing memory if the process 322 * has been killed by by SIGKILL 323 */ 324 if (fatal_signal_pending(current)) 325 goto free_cma; 326 page++; 327 nr_clear_pages--; 328 } 329 } else { 330 memset(page_address(cma_pages), 0, size); 331 } 332 333 buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL); 334 if (!buffer->pages) { 335 ret = -ENOMEM; 336 goto free_cma; 337 } 338 339 for (pg = 0; pg < pagecount; pg++) 340 buffer->pages[pg] = &cma_pages[pg]; 341 342 buffer->cma_pages = cma_pages; 343 buffer->heap = cma_heap; 344 buffer->pagecount = pagecount; 345 346 /* create the dmabuf */ 347 exp_info.exp_name = dma_heap_get_name(heap); 348 exp_info.ops = &cma_heap_buf_ops; 349 exp_info.size = buffer->len; 350 exp_info.flags = fd_flags; 351 exp_info.priv = buffer; 352 dmabuf = dma_buf_export(&exp_info); 353 if (IS_ERR(dmabuf)) { 354 ret = PTR_ERR(dmabuf); 355 goto free_pages; 356 } 357 return dmabuf; 358 359 free_pages: 360 kfree(buffer->pages); 361 free_cma: 362 cma_release(cma_heap->cma, cma_pages, pagecount); 363 free_buffer: 364 kfree(buffer); 365 366 return ERR_PTR(ret); 367 } 368 369 static const struct dma_heap_ops cma_heap_ops = { 370 .allocate = cma_heap_allocate, 371 }; 372 373 static int __add_cma_heap(struct cma *cma, void *data) 374 { 375 struct cma_heap *cma_heap; 376 struct dma_heap_export_info exp_info; 377 378 cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL); 379 if (!cma_heap) 380 return -ENOMEM; 381 cma_heap->cma = cma; 382 383 exp_info.name = cma_get_name(cma); 384 exp_info.ops = &cma_heap_ops; 385 exp_info.priv = cma_heap; 386 387 cma_heap->heap = dma_heap_add(&exp_info); 388 if (IS_ERR(cma_heap->heap)) { 389 int ret = PTR_ERR(cma_heap->heap); 390 391 kfree(cma_heap); 392 return ret; 393 } 394 395 return 0; 396 } 397 398 static int add_default_cma_heap(void) 399 { 400 struct cma *default_cma = dev_get_cma_area(NULL); 401 int ret = 0; 402 403 if (default_cma) 404 ret = __add_cma_heap(default_cma, NULL); 405 406 return ret; 407 } 408 module_init(add_default_cma_heap); 409 MODULE_DESCRIPTION("DMA-BUF CMA Heap"); 410 MODULE_LICENSE("GPL v2"); 411