1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/dma-resv.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/refcount.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 
22 #include <media/videobuf2-v4l2.h>
23 #include <media/videobuf2-vmalloc.h>
24 #include <media/videobuf2-memops.h>
25 
26 struct vb2_vmalloc_buf {
27 	void				*vaddr;
28 	struct frame_vector		*vec;
29 	enum dma_data_direction		dma_dir;
30 	unsigned long			size;
31 	refcount_t			refcount;
32 	struct vb2_vmarea_handler	handler;
33 	struct dma_buf			*dbuf;
34 };
35 
36 static void vb2_vmalloc_put(void *buf_priv);
37 
38 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
39 			       unsigned long size)
40 {
41 	struct vb2_vmalloc_buf *buf;
42 
43 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
44 	if (!buf)
45 		return ERR_PTR(-ENOMEM);
46 
47 	buf->size = size;
48 	buf->vaddr = vmalloc_user(buf->size);
49 	if (!buf->vaddr) {
50 		pr_debug("vmalloc of size %ld failed\n", buf->size);
51 		kfree(buf);
52 		return ERR_PTR(-ENOMEM);
53 	}
54 
55 	buf->dma_dir = vb->vb2_queue->dma_dir;
56 	buf->handler.refcount = &buf->refcount;
57 	buf->handler.put = vb2_vmalloc_put;
58 	buf->handler.arg = buf;
59 
60 	refcount_set(&buf->refcount, 1);
61 	return buf;
62 }
63 
64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 	struct vb2_vmalloc_buf *buf = buf_priv;
67 
68 	if (refcount_dec_and_test(&buf->refcount)) {
69 		vfree(buf->vaddr);
70 		kfree(buf);
71 	}
72 }
73 
74 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
75 				     unsigned long vaddr, unsigned long size)
76 {
77 	struct vb2_vmalloc_buf *buf;
78 	struct frame_vector *vec;
79 	int n_pages, offset, i;
80 	int ret = -ENOMEM;
81 
82 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 	if (!buf)
84 		return ERR_PTR(-ENOMEM);
85 
86 	buf->dma_dir = vb->vb2_queue->dma_dir;
87 	offset = vaddr & ~PAGE_MASK;
88 	buf->size = size;
89 	vec = vb2_create_framevec(vaddr, size);
90 	if (IS_ERR(vec)) {
91 		ret = PTR_ERR(vec);
92 		goto fail_pfnvec_create;
93 	}
94 	buf->vec = vec;
95 	n_pages = frame_vector_count(vec);
96 	if (frame_vector_to_pages(vec) < 0) {
97 		unsigned long *nums = frame_vector_pfns(vec);
98 
99 		/*
100 		 * We cannot get page pointers for these pfns. Check memory is
101 		 * physically contiguous and use direct mapping.
102 		 */
103 		for (i = 1; i < n_pages; i++)
104 			if (nums[i-1] + 1 != nums[i])
105 				goto fail_map;
106 		buf->vaddr = (__force void *)
107 			ioremap(__pfn_to_phys(nums[0]), size + offset);
108 	} else {
109 		buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
110 	}
111 
112 	if (!buf->vaddr)
113 		goto fail_map;
114 	buf->vaddr += offset;
115 	return buf;
116 
117 fail_map:
118 	vb2_destroy_framevec(vec);
119 fail_pfnvec_create:
120 	kfree(buf);
121 
122 	return ERR_PTR(ret);
123 }
124 
125 static void vb2_vmalloc_put_userptr(void *buf_priv)
126 {
127 	struct vb2_vmalloc_buf *buf = buf_priv;
128 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
129 	unsigned int i;
130 	struct page **pages;
131 	unsigned int n_pages;
132 
133 	if (!buf->vec->is_pfns) {
134 		n_pages = frame_vector_count(buf->vec);
135 		pages = frame_vector_pages(buf->vec);
136 		if (vaddr)
137 			vm_unmap_ram((void *)vaddr, n_pages);
138 		if (buf->dma_dir == DMA_FROM_DEVICE ||
139 		    buf->dma_dir == DMA_BIDIRECTIONAL)
140 			for (i = 0; i < n_pages; i++)
141 				set_page_dirty_lock(pages[i]);
142 	} else {
143 		iounmap((__force void __iomem *)buf->vaddr);
144 	}
145 	vb2_destroy_framevec(buf->vec);
146 	kfree(buf);
147 }
148 
149 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
150 {
151 	struct vb2_vmalloc_buf *buf = buf_priv;
152 
153 	if (!buf->vaddr) {
154 		pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
155 		return NULL;
156 	}
157 
158 	return buf->vaddr;
159 }
160 
161 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
162 {
163 	struct vb2_vmalloc_buf *buf = buf_priv;
164 	return refcount_read(&buf->refcount);
165 }
166 
167 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
168 {
169 	struct vb2_vmalloc_buf *buf = buf_priv;
170 	int ret;
171 
172 	if (!buf) {
173 		pr_err("No memory to map\n");
174 		return -EINVAL;
175 	}
176 
177 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
178 	if (ret) {
179 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
180 		return ret;
181 	}
182 
183 	/*
184 	 * Make sure that vm_areas for 2 buffers won't be merged together
185 	 */
186 	vma->vm_flags		|= VM_DONTEXPAND;
187 
188 	/*
189 	 * Use common vm_area operations to track buffer refcount.
190 	 */
191 	vma->vm_private_data	= &buf->handler;
192 	vma->vm_ops		= &vb2_common_vm_ops;
193 
194 	vma->vm_ops->open(vma);
195 
196 	return 0;
197 }
198 
199 #ifdef CONFIG_HAS_DMA
200 /*********************************************/
201 /*         DMABUF ops for exporters          */
202 /*********************************************/
203 
204 struct vb2_vmalloc_attachment {
205 	struct sg_table sgt;
206 	enum dma_data_direction dma_dir;
207 };
208 
209 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
210 	struct dma_buf_attachment *dbuf_attach)
211 {
212 	struct vb2_vmalloc_attachment *attach;
213 	struct vb2_vmalloc_buf *buf = dbuf->priv;
214 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
215 	struct sg_table *sgt;
216 	struct scatterlist *sg;
217 	void *vaddr = buf->vaddr;
218 	int ret;
219 	int i;
220 
221 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
222 	if (!attach)
223 		return -ENOMEM;
224 
225 	sgt = &attach->sgt;
226 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
227 	if (ret) {
228 		kfree(attach);
229 		return ret;
230 	}
231 	for_each_sgtable_sg(sgt, sg, i) {
232 		struct page *page = vmalloc_to_page(vaddr);
233 
234 		if (!page) {
235 			sg_free_table(sgt);
236 			kfree(attach);
237 			return -ENOMEM;
238 		}
239 		sg_set_page(sg, page, PAGE_SIZE, 0);
240 		vaddr += PAGE_SIZE;
241 	}
242 
243 	attach->dma_dir = DMA_NONE;
244 	dbuf_attach->priv = attach;
245 	return 0;
246 }
247 
248 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
249 	struct dma_buf_attachment *db_attach)
250 {
251 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
252 	struct sg_table *sgt;
253 
254 	if (!attach)
255 		return;
256 
257 	sgt = &attach->sgt;
258 
259 	/* release the scatterlist cache */
260 	if (attach->dma_dir != DMA_NONE)
261 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
262 	sg_free_table(sgt);
263 	kfree(attach);
264 	db_attach->priv = NULL;
265 }
266 
267 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
268 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
269 {
270 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
271 	struct sg_table *sgt;
272 
273 	sgt = &attach->sgt;
274 	/* return previously mapped sg table */
275 	if (attach->dma_dir == dma_dir)
276 		return sgt;
277 
278 	/* release any previous cache */
279 	if (attach->dma_dir != DMA_NONE) {
280 		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
281 		attach->dma_dir = DMA_NONE;
282 	}
283 
284 	/* mapping to the client with new direction */
285 	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
286 		pr_err("failed to map scatterlist\n");
287 		return ERR_PTR(-EIO);
288 	}
289 
290 	attach->dma_dir = dma_dir;
291 
292 	return sgt;
293 }
294 
295 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
296 	struct sg_table *sgt, enum dma_data_direction dma_dir)
297 {
298 	/* nothing to be done here */
299 }
300 
301 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
302 {
303 	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
304 	vb2_vmalloc_put(dbuf->priv);
305 }
306 
307 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
308 				       struct iosys_map *map)
309 {
310 	struct vb2_vmalloc_buf *buf = dbuf->priv;
311 
312 	iosys_map_set_vaddr(map, buf->vaddr);
313 
314 	return 0;
315 }
316 
317 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
318 	struct vm_area_struct *vma)
319 {
320 	dma_resv_assert_held(dbuf->resv);
321 
322 	return vb2_vmalloc_mmap(dbuf->priv, vma);
323 }
324 
325 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
326 	.attach = vb2_vmalloc_dmabuf_ops_attach,
327 	.detach = vb2_vmalloc_dmabuf_ops_detach,
328 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
329 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
330 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
331 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
332 	.release = vb2_vmalloc_dmabuf_ops_release,
333 };
334 
335 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
336 					      void *buf_priv,
337 					      unsigned long flags)
338 {
339 	struct vb2_vmalloc_buf *buf = buf_priv;
340 	struct dma_buf *dbuf;
341 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
342 
343 	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
344 	exp_info.size = buf->size;
345 	exp_info.flags = flags;
346 	exp_info.priv = buf;
347 
348 	if (WARN_ON(!buf->vaddr))
349 		return NULL;
350 
351 	dbuf = dma_buf_export(&exp_info);
352 	if (IS_ERR(dbuf))
353 		return NULL;
354 
355 	/* dmabuf keeps reference to vb2 buffer */
356 	refcount_inc(&buf->refcount);
357 
358 	return dbuf;
359 }
360 #endif /* CONFIG_HAS_DMA */
361 
362 
363 /*********************************************/
364 /*       callbacks for DMABUF buffers        */
365 /*********************************************/
366 
367 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
368 {
369 	struct vb2_vmalloc_buf *buf = mem_priv;
370 	struct iosys_map map;
371 	int ret;
372 
373 	ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
374 	if (ret)
375 		return -EFAULT;
376 	buf->vaddr = map.vaddr;
377 
378 	return 0;
379 }
380 
381 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
382 {
383 	struct vb2_vmalloc_buf *buf = mem_priv;
384 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
385 
386 	dma_buf_vunmap_unlocked(buf->dbuf, &map);
387 	buf->vaddr = NULL;
388 }
389 
390 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
391 {
392 	struct vb2_vmalloc_buf *buf = mem_priv;
393 	struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
394 
395 	if (buf->vaddr)
396 		dma_buf_vunmap_unlocked(buf->dbuf, &map);
397 
398 	kfree(buf);
399 }
400 
401 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
402 				       struct device *dev,
403 				       struct dma_buf *dbuf,
404 				       unsigned long size)
405 {
406 	struct vb2_vmalloc_buf *buf;
407 
408 	if (dbuf->size < size)
409 		return ERR_PTR(-EFAULT);
410 
411 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
412 	if (!buf)
413 		return ERR_PTR(-ENOMEM);
414 
415 	buf->dbuf = dbuf;
416 	buf->dma_dir = vb->vb2_queue->dma_dir;
417 	buf->size = size;
418 
419 	return buf;
420 }
421 
422 
423 const struct vb2_mem_ops vb2_vmalloc_memops = {
424 	.alloc		= vb2_vmalloc_alloc,
425 	.put		= vb2_vmalloc_put,
426 	.get_userptr	= vb2_vmalloc_get_userptr,
427 	.put_userptr	= vb2_vmalloc_put_userptr,
428 #ifdef CONFIG_HAS_DMA
429 	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
430 #endif
431 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
432 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
433 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
434 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
435 	.vaddr		= vb2_vmalloc_vaddr,
436 	.mmap		= vb2_vmalloc_mmap,
437 	.num_users	= vb2_vmalloc_num_users,
438 };
439 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
440 
441 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
442 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
443 MODULE_LICENSE("GPL");
444 MODULE_IMPORT_NS(DMA_BUF);
445