1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
24 
25 struct vb2_vmalloc_buf {
26 	void				*vaddr;
27 	struct frame_vector		*vec;
28 	enum dma_data_direction		dma_dir;
29 	unsigned long			size;
30 	refcount_t			refcount;
31 	struct vb2_vmarea_handler	handler;
32 	struct dma_buf			*dbuf;
33 };
34 
35 static void vb2_vmalloc_put(void *buf_priv);
36 
37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
38 			       unsigned long size)
39 {
40 	struct vb2_vmalloc_buf *buf;
41 
42 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
43 	if (!buf)
44 		return ERR_PTR(-ENOMEM);
45 
46 	buf->size = size;
47 	buf->vaddr = vmalloc_user(buf->size);
48 	if (!buf->vaddr) {
49 		pr_debug("vmalloc of size %ld failed\n", buf->size);
50 		kfree(buf);
51 		return ERR_PTR(-ENOMEM);
52 	}
53 
54 	buf->dma_dir = vb->vb2_queue->dma_dir;
55 	buf->handler.refcount = &buf->refcount;
56 	buf->handler.put = vb2_vmalloc_put;
57 	buf->handler.arg = buf;
58 
59 	refcount_set(&buf->refcount, 1);
60 	return buf;
61 }
62 
63 static void vb2_vmalloc_put(void *buf_priv)
64 {
65 	struct vb2_vmalloc_buf *buf = buf_priv;
66 
67 	if (refcount_dec_and_test(&buf->refcount)) {
68 		vfree(buf->vaddr);
69 		kfree(buf);
70 	}
71 }
72 
73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 				     unsigned long vaddr, unsigned long size)
75 {
76 	struct vb2_vmalloc_buf *buf;
77 	struct frame_vector *vec;
78 	int n_pages, offset, i;
79 	int ret = -ENOMEM;
80 
81 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
82 	if (!buf)
83 		return ERR_PTR(-ENOMEM);
84 
85 	buf->dma_dir = vb->vb2_queue->dma_dir;
86 	offset = vaddr & ~PAGE_MASK;
87 	buf->size = size;
88 	vec = vb2_create_framevec(vaddr, size,
89 				  buf->dma_dir == DMA_FROM_DEVICE ||
90 				  buf->dma_dir == DMA_BIDIRECTIONAL);
91 	if (IS_ERR(vec)) {
92 		ret = PTR_ERR(vec);
93 		goto fail_pfnvec_create;
94 	}
95 	buf->vec = vec;
96 	n_pages = frame_vector_count(vec);
97 	if (frame_vector_to_pages(vec) < 0) {
98 		unsigned long *nums = frame_vector_pfns(vec);
99 
100 		/*
101 		 * We cannot get page pointers for these pfns. Check memory is
102 		 * physically contiguous and use direct mapping.
103 		 */
104 		for (i = 1; i < n_pages; i++)
105 			if (nums[i-1] + 1 != nums[i])
106 				goto fail_map;
107 		buf->vaddr = (__force void *)
108 			ioremap(__pfn_to_phys(nums[0]), size + offset);
109 	} else {
110 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
111 	}
112 
113 	if (!buf->vaddr)
114 		goto fail_map;
115 	buf->vaddr += offset;
116 	return buf;
117 
118 fail_map:
119 	vb2_destroy_framevec(vec);
120 fail_pfnvec_create:
121 	kfree(buf);
122 
123 	return ERR_PTR(ret);
124 }
125 
126 static void vb2_vmalloc_put_userptr(void *buf_priv)
127 {
128 	struct vb2_vmalloc_buf *buf = buf_priv;
129 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 	unsigned int i;
131 	struct page **pages;
132 	unsigned int n_pages;
133 
134 	if (!buf->vec->is_pfns) {
135 		n_pages = frame_vector_count(buf->vec);
136 		pages = frame_vector_pages(buf->vec);
137 		if (vaddr)
138 			vm_unmap_ram((void *)vaddr, n_pages);
139 		if (buf->dma_dir == DMA_FROM_DEVICE ||
140 		    buf->dma_dir == DMA_BIDIRECTIONAL)
141 			for (i = 0; i < n_pages; i++)
142 				set_page_dirty_lock(pages[i]);
143 	} else {
144 		iounmap((__force void __iomem *)buf->vaddr);
145 	}
146 	vb2_destroy_framevec(buf->vec);
147 	kfree(buf);
148 }
149 
150 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
151 {
152 	struct vb2_vmalloc_buf *buf = buf_priv;
153 
154 	if (!buf->vaddr) {
155 		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
156 		return NULL;
157 	}
158 
159 	return buf->vaddr;
160 }
161 
162 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
163 {
164 	struct vb2_vmalloc_buf *buf = buf_priv;
165 	return refcount_read(&buf->refcount);
166 }
167 
168 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
169 {
170 	struct vb2_vmalloc_buf *buf = buf_priv;
171 	int ret;
172 
173 	if (!buf) {
174 		pr_err("No memory to map\n");
175 		return -EINVAL;
176 	}
177 
178 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
179 	if (ret) {
180 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
181 		return ret;
182 	}
183 
184 	/*
185 	 * Make sure that vm_areas for 2 buffers won't be merged together
186 	 */
187 	vm_flags_set(vma, VM_DONTEXPAND);
188 
189 	/*
190 	 * Use common vm_area operations to track buffer refcount.
191 	 */
192 	vma->vm_private_data	= &buf->handler;
193 	vma->vm_ops		= &vb2_common_vm_ops;
194 
195 	vma->vm_ops->open(vma);
196 
197 	return 0;
198 }
199 
200 #ifdef CONFIG_HAS_DMA
201 /*********************************************/
202 /*         DMABUF ops for exporters          */
203 /*********************************************/
204 
205 struct vb2_vmalloc_attachment {
206 	struct sg_table sgt;
207 	enum dma_data_direction dma_dir;
208 };
209 
210 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
211 	struct dma_buf_attachment *dbuf_attach)
212 {
213 	struct vb2_vmalloc_attachment *attach;
214 	struct vb2_vmalloc_buf *buf = dbuf->priv;
215 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
216 	struct sg_table *sgt;
217 	struct scatterlist *sg;
218 	void *vaddr = buf->vaddr;
219 	int ret;
220 	int i;
221 
222 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
223 	if (!attach)
224 		return -ENOMEM;
225 
226 	sgt = &attach->sgt;
227 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
228 	if (ret) {
229 		kfree(attach);
230 		return ret;
231 	}
232 	for_each_sgtable_sg(sgt, sg, i) {
233 		struct page *page = vmalloc_to_page(vaddr);
234 
235 		if (!page) {
236 			sg_free_table(sgt);
237 			kfree(attach);
238 			return -ENOMEM;
239 		}
240 		sg_set_page(sg, page, PAGE_SIZE, 0);
241 		vaddr += PAGE_SIZE;
242 	}
243 
244 	attach->dma_dir = DMA_NONE;
245 	dbuf_attach->priv = attach;
246 	return 0;
247 }
248 
249 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
250 	struct dma_buf_attachment *db_attach)
251 {
252 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
253 	struct sg_table *sgt;
254 
255 	if (!attach)
256 		return;
257 
258 	sgt = &attach->sgt;
259 
260 	/* release the scatterlist cache */
261 	if (attach->dma_dir != DMA_NONE)
262 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
263 	sg_free_table(sgt);
264 	kfree(attach);
265 	db_attach->priv = NULL;
266 }
267 
268 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
269 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
270 {
271 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
272 	struct sg_table *sgt;
273 
274 	sgt = &attach->sgt;
275 	/* return previously mapped sg table */
276 	if (attach->dma_dir == dma_dir)
277 		return sgt;
278 
279 	/* release any previous cache */
280 	if (attach->dma_dir != DMA_NONE) {
281 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
282 		attach->dma_dir = DMA_NONE;
283 	}
284 
285 	/* mapping to the client with new direction */
286 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
287 		pr_err("failed to map scatterlist\n");
288 		return ERR_PTR(-EIO);
289 	}
290 
291 	attach->dma_dir = dma_dir;
292 
293 	return sgt;
294 }
295 
296 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
297 	struct sg_table *sgt, enum dma_data_direction dma_dir)
298 {
299 	/* nothing to be done here */
300 }
301 
302 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
303 {
304 	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
305 	vb2_vmalloc_put(dbuf->priv);
306 }
307 
308 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
309 				       struct iosys_map *map)
310 {
311 	struct vb2_vmalloc_buf *buf = dbuf->priv;
312 
313 	iosys_map_set_vaddr(map, buf->vaddr);
314 
315 	return 0;
316 }
317 
318 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
319 	struct vm_area_struct *vma)
320 {
321 	return vb2_vmalloc_mmap(dbuf->priv, vma);
322 }
323 
324 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
325 	.attach = vb2_vmalloc_dmabuf_ops_attach,
326 	.detach = vb2_vmalloc_dmabuf_ops_detach,
327 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
328 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
329 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
330 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
331 	.release = vb2_vmalloc_dmabuf_ops_release,
332 };
333 
334 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
335 					      void *buf_priv,
336 					      unsigned long flags)
337 {
338 	struct vb2_vmalloc_buf *buf = buf_priv;
339 	struct dma_buf *dbuf;
340 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
341 
342 	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
343 	exp_info.size = buf->size;
344 	exp_info.flags = flags;
345 	exp_info.priv = buf;
346 
347 	if (WARN_ON(!buf->vaddr))
348 		return NULL;
349 
350 	dbuf = dma_buf_export(&exp_info);
351 	if (IS_ERR(dbuf))
352 		return NULL;
353 
354 	/* dmabuf keeps reference to vb2 buffer */
355 	refcount_inc(&buf->refcount);
356 
357 	return dbuf;
358 }
359 #endif /* CONFIG_HAS_DMA */
360 
361 
362 /*********************************************/
363 /*       callbacks for DMABUF buffers        */
364 /*********************************************/
365 
366 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
367 {
368 	struct vb2_vmalloc_buf *buf = mem_priv;
369 	struct iosys_map map;
370 	int ret;
371 
372 	ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
373 	if (ret)
374 		return -EFAULT;
375 	buf->vaddr = map.vaddr;
376 
377 	return 0;
378 }
379 
380 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
381 {
382 	struct vb2_vmalloc_buf *buf = mem_priv;
383 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
384 
385 	dma_buf_vunmap_unlocked(buf->dbuf, &map);
386 	buf->vaddr = NULL;
387 }
388 
389 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
390 {
391 	struct vb2_vmalloc_buf *buf = mem_priv;
392 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
393 
394 	if (buf->vaddr)
395 		dma_buf_vunmap_unlocked(buf->dbuf, &map);
396 
397 	kfree(buf);
398 }
399 
400 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
401 				       struct device *dev,
402 				       struct dma_buf *dbuf,
403 				       unsigned long size)
404 {
405 	struct vb2_vmalloc_buf *buf;
406 
407 	if (dbuf->size < size)
408 		return ERR_PTR(-EFAULT);
409 
410 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
411 	if (!buf)
412 		return ERR_PTR(-ENOMEM);
413 
414 	buf->dbuf = dbuf;
415 	buf->dma_dir = vb->vb2_queue->dma_dir;
416 	buf->size = size;
417 
418 	return buf;
419 }
420 
421 
422 const struct vb2_mem_ops vb2_vmalloc_memops = {
423 	.alloc		= vb2_vmalloc_alloc,
424 	.put		= vb2_vmalloc_put,
425 	.get_userptr	= vb2_vmalloc_get_userptr,
426 	.put_userptr	= vb2_vmalloc_put_userptr,
427 #ifdef CONFIG_HAS_DMA
428 	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
429 #endif
430 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
431 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
432 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
433 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
434 	.vaddr		= vb2_vmalloc_vaddr,
435 	.mmap		= vb2_vmalloc_mmap,
436 	.num_users	= vb2_vmalloc_num_users,
437 };
438 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
439 
440 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
441 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
442 MODULE_LICENSE("GPL");
443 MODULE_IMPORT_NS(DMA_BUF);
444