1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * helper functions for physically contiguous capture buffers 4 * 5 * The functions support hardware lacking scatter gather support 6 * (i.e. the buffers must be linear in physical memory) 7 * 8 * Copyright (c) 2008 Magnus Damm 9 * 10 * Based on videobuf-vmalloc.c, 11 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org> 12 */ 13 14 #include <linux/init.h> 15 #include <linux/module.h> 16 #include <linux/mm.h> 17 #include <linux/pagemap.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <media/videobuf-dma-contig.h> 22 23 struct videobuf_dma_contig_memory { 24 u32 magic; 25 void *vaddr; 26 dma_addr_t dma_handle; 27 unsigned long size; 28 }; 29 30 #define MAGIC_DC_MEM 0x0733ac61 31 #define MAGIC_CHECK(is, should) \ 32 if (unlikely((is) != (should))) { \ 33 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \ 34 BUG(); \ 35 } 36 37 static int __videobuf_dc_alloc(struct device *dev, 38 struct videobuf_dma_contig_memory *mem, 39 unsigned long size, gfp_t flags) 40 { 41 mem->size = size; 42 mem->vaddr = dma_alloc_coherent(dev, mem->size, 43 &mem->dma_handle, flags); 44 45 if (!mem->vaddr) { 46 dev_err(dev, "memory alloc size %ld failed\n", mem->size); 47 return -ENOMEM; 48 } 49 50 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size); 51 52 return 0; 53 } 54 55 static void __videobuf_dc_free(struct device *dev, 56 struct videobuf_dma_contig_memory *mem) 57 { 58 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle); 59 60 mem->vaddr = NULL; 61 } 62 63 static void videobuf_vm_open(struct vm_area_struct *vma) 64 { 65 struct videobuf_mapping *map = vma->vm_private_data; 66 67 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", 68 map, map->count, vma->vm_start, vma->vm_end); 69 70 map->count++; 71 } 72 73 static void videobuf_vm_close(struct vm_area_struct *vma) 74 { 75 struct videobuf_mapping *map = vma->vm_private_data; 76 struct videobuf_queue *q = map->q; 77 int i; 78 79 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", 80 map, map->count, vma->vm_start, vma->vm_end); 81 82 map->count--; 83 if (0 == map->count) { 84 struct videobuf_dma_contig_memory *mem; 85 86 dev_dbg(q->dev, "munmap %p q=%p\n", map, q); 87 videobuf_queue_lock(q); 88 89 /* We need first to cancel streams, before unmapping */ 90 if (q->streaming) 91 videobuf_queue_cancel(q); 92 93 for (i = 0; i < VIDEO_MAX_FRAME; i++) { 94 if (NULL == q->bufs[i]) 95 continue; 96 97 if (q->bufs[i]->map != map) 98 continue; 99 100 mem = q->bufs[i]->priv; 101 if (mem) { 102 /* This callback is called only if kernel has 103 allocated memory and this memory is mmapped. 104 In this case, memory should be freed, 105 in order to do memory unmap. 106 */ 107 108 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 109 110 /* vfree is not atomic - can't be 111 called with IRQ's disabled 112 */ 113 dev_dbg(q->dev, "buf[%d] freeing %p\n", 114 i, mem->vaddr); 115 116 __videobuf_dc_free(q->dev, mem); 117 mem->vaddr = NULL; 118 } 119 120 q->bufs[i]->map = NULL; 121 q->bufs[i]->baddr = 0; 122 } 123 124 kfree(map); 125 126 videobuf_queue_unlock(q); 127 } 128 } 129 130 static const struct vm_operations_struct videobuf_vm_ops = { 131 .open = videobuf_vm_open, 132 .close = videobuf_vm_close, 133 }; 134 135 /** 136 * videobuf_dma_contig_user_put() - reset pointer to user space buffer 137 * @mem: per-buffer private videobuf-dma-contig data 138 * 139 * This function resets the user space pointer 140 */ 141 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem) 142 { 143 mem->dma_handle = 0; 144 mem->size = 0; 145 } 146 147 /** 148 * videobuf_dma_contig_user_get() - setup user space memory pointer 149 * @mem: per-buffer private videobuf-dma-contig data 150 * @vb: video buffer to map 151 * 152 * This function validates and sets up a pointer to user space memory. 153 * Only physically contiguous pfn-mapped memory is accepted. 154 * 155 * Returns 0 if successful. 156 */ 157 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem, 158 struct videobuf_buffer *vb) 159 { 160 struct mm_struct *mm = current->mm; 161 struct vm_area_struct *vma; 162 unsigned long prev_pfn, this_pfn; 163 unsigned long pages_done, user_address; 164 unsigned int offset; 165 int ret; 166 167 offset = vb->baddr & ~PAGE_MASK; 168 mem->size = PAGE_ALIGN(vb->size + offset); 169 ret = -EINVAL; 170 171 down_read(&mm->mmap_sem); 172 173 vma = find_vma(mm, vb->baddr); 174 if (!vma) 175 goto out_up; 176 177 if ((vb->baddr + mem->size) > vma->vm_end) 178 goto out_up; 179 180 pages_done = 0; 181 prev_pfn = 0; /* kill warning */ 182 user_address = vb->baddr; 183 184 while (pages_done < (mem->size >> PAGE_SHIFT)) { 185 ret = follow_pfn(vma, user_address, &this_pfn); 186 if (ret) 187 break; 188 189 if (pages_done == 0) 190 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset; 191 else if (this_pfn != (prev_pfn + 1)) 192 ret = -EFAULT; 193 194 if (ret) 195 break; 196 197 prev_pfn = this_pfn; 198 user_address += PAGE_SIZE; 199 pages_done++; 200 } 201 202 out_up: 203 up_read(¤t->mm->mmap_sem); 204 205 return ret; 206 } 207 208 static struct videobuf_buffer *__videobuf_alloc(size_t size) 209 { 210 struct videobuf_dma_contig_memory *mem; 211 struct videobuf_buffer *vb; 212 213 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL); 214 if (vb) { 215 vb->priv = ((char *)vb) + size; 216 mem = vb->priv; 217 mem->magic = MAGIC_DC_MEM; 218 } 219 220 return vb; 221 } 222 223 static void *__videobuf_to_vaddr(struct videobuf_buffer *buf) 224 { 225 struct videobuf_dma_contig_memory *mem = buf->priv; 226 227 BUG_ON(!mem); 228 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 229 230 return mem->vaddr; 231 } 232 233 static int __videobuf_iolock(struct videobuf_queue *q, 234 struct videobuf_buffer *vb, 235 struct v4l2_framebuffer *fbuf) 236 { 237 struct videobuf_dma_contig_memory *mem = vb->priv; 238 239 BUG_ON(!mem); 240 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 241 242 switch (vb->memory) { 243 case V4L2_MEMORY_MMAP: 244 dev_dbg(q->dev, "%s memory method MMAP\n", __func__); 245 246 /* All handling should be done by __videobuf_mmap_mapper() */ 247 if (!mem->vaddr) { 248 dev_err(q->dev, "memory is not allocated/mmapped.\n"); 249 return -EINVAL; 250 } 251 break; 252 case V4L2_MEMORY_USERPTR: 253 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__); 254 255 /* handle pointer from user space */ 256 if (vb->baddr) 257 return videobuf_dma_contig_user_get(mem, vb); 258 259 /* allocate memory for the read() method */ 260 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), 261 GFP_KERNEL)) 262 return -ENOMEM; 263 break; 264 case V4L2_MEMORY_OVERLAY: 265 default: 266 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__); 267 return -EINVAL; 268 } 269 270 return 0; 271 } 272 273 static int __videobuf_mmap_mapper(struct videobuf_queue *q, 274 struct videobuf_buffer *buf, 275 struct vm_area_struct *vma) 276 { 277 struct videobuf_dma_contig_memory *mem; 278 struct videobuf_mapping *map; 279 int retval; 280 unsigned long size; 281 282 dev_dbg(q->dev, "%s\n", __func__); 283 284 /* create mapping + update buffer list */ 285 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); 286 if (!map) 287 return -ENOMEM; 288 289 buf->map = map; 290 map->q = q; 291 292 buf->baddr = vma->vm_start; 293 294 mem = buf->priv; 295 BUG_ON(!mem); 296 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 297 298 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), 299 GFP_KERNEL | __GFP_COMP)) 300 goto error; 301 302 /* Try to remap memory */ 303 size = vma->vm_end - vma->vm_start; 304 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 305 306 /* the "vm_pgoff" is just used in v4l2 to find the 307 * corresponding buffer data structure which is allocated 308 * earlier and it does not mean the offset from the physical 309 * buffer start address as usual. So set it to 0 to pass 310 * the sanity check in vm_iomap_memory(). 311 */ 312 vma->vm_pgoff = 0; 313 314 retval = vm_iomap_memory(vma, mem->dma_handle, size); 315 if (retval) { 316 dev_err(q->dev, "mmap: remap failed with error %d. ", 317 retval); 318 dma_free_coherent(q->dev, mem->size, 319 mem->vaddr, mem->dma_handle); 320 goto error; 321 } 322 323 vma->vm_ops = &videobuf_vm_ops; 324 vma->vm_flags |= VM_DONTEXPAND; 325 vma->vm_private_data = map; 326 327 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", 328 map, q, vma->vm_start, vma->vm_end, 329 (long int)buf->bsize, vma->vm_pgoff, buf->i); 330 331 videobuf_vm_open(vma); 332 333 return 0; 334 335 error: 336 kfree(map); 337 return -ENOMEM; 338 } 339 340 static struct videobuf_qtype_ops qops = { 341 .magic = MAGIC_QTYPE_OPS, 342 .alloc_vb = __videobuf_alloc, 343 .iolock = __videobuf_iolock, 344 .mmap_mapper = __videobuf_mmap_mapper, 345 .vaddr = __videobuf_to_vaddr, 346 }; 347 348 void videobuf_queue_dma_contig_init(struct videobuf_queue *q, 349 const struct videobuf_queue_ops *ops, 350 struct device *dev, 351 spinlock_t *irqlock, 352 enum v4l2_buf_type type, 353 enum v4l2_field field, 354 unsigned int msize, 355 void *priv, 356 struct mutex *ext_lock) 357 { 358 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, 359 priv, &qops, ext_lock); 360 } 361 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init); 362 363 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf) 364 { 365 struct videobuf_dma_contig_memory *mem = buf->priv; 366 367 BUG_ON(!mem); 368 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 369 370 return mem->dma_handle; 371 } 372 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig); 373 374 void videobuf_dma_contig_free(struct videobuf_queue *q, 375 struct videobuf_buffer *buf) 376 { 377 struct videobuf_dma_contig_memory *mem = buf->priv; 378 379 /* mmapped memory can't be freed here, otherwise mmapped region 380 would be released, while still needed. In this case, the memory 381 release should happen inside videobuf_vm_close(). 382 So, it should free memory only if the memory were allocated for 383 read() operation. 384 */ 385 if (buf->memory != V4L2_MEMORY_USERPTR) 386 return; 387 388 if (!mem) 389 return; 390 391 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM); 392 393 /* handle user space pointer case */ 394 if (buf->baddr) { 395 videobuf_dma_contig_user_put(mem); 396 return; 397 } 398 399 /* read() method */ 400 if (mem->vaddr) { 401 __videobuf_dc_free(q->dev, mem); 402 mem->vaddr = NULL; 403 } 404 } 405 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); 406 407 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers"); 408 MODULE_AUTHOR("Magnus Damm"); 409 MODULE_LICENSE("GPL"); 410