1fbb0de79SGerd Hoffmann // SPDX-License-Identifier: GPL-2.0
2913965c4SGerd Hoffmann #include <linux/cred.h>
3fbb0de79SGerd Hoffmann #include <linux/device.h>
4fbb0de79SGerd Hoffmann #include <linux/dma-buf.h>
5aa3f9989SDmitry Osipenko #include <linux/dma-resv.h>
6fbb0de79SGerd Hoffmann #include <linux/highmem.h>
7913965c4SGerd Hoffmann #include <linux/init.h>
8913965c4SGerd Hoffmann #include <linux/kernel.h>
9fbb0de79SGerd Hoffmann #include <linux/memfd.h>
10913965c4SGerd Hoffmann #include <linux/miscdevice.h>
11913965c4SGerd Hoffmann #include <linux/module.h>
12913965c4SGerd Hoffmann #include <linux/shmem_fs.h>
13913965c4SGerd Hoffmann #include <linux/slab.h>
14913965c4SGerd Hoffmann #include <linux/udmabuf.h>
157ae2e684SLukasz Wiecaszek #include <linux/vmalloc.h>
167ae2e684SLukasz Wiecaszek #include <linux/iosys-map.h>
17fbb0de79SGerd Hoffmann
182e717a55SDongwon Kim static int list_limit = 1024;
192e717a55SDongwon Kim module_param(list_limit, int, 0644);
202e717a55SDongwon Kim MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
212e717a55SDongwon Kim
222e717a55SDongwon Kim static int size_limit_mb = 64;
232e717a55SDongwon Kim module_param(size_limit_mb, int, 0644);
242e717a55SDongwon Kim MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
25dc4716d7SGerd Hoffmann
26fbb0de79SGerd Hoffmann struct udmabuf {
27b35f57c4SGerd Hoffmann pgoff_t pagecount;
28fbb0de79SGerd Hoffmann struct page **pages;
29284562e1SGurchetan Singh struct sg_table *sg;
30c1bbed66SGurchetan Singh struct miscdevice *device;
31fbb0de79SGerd Hoffmann };
32fbb0de79SGerd Hoffmann
udmabuf_vm_fault(struct vm_fault * vmf)33300133d3SSouptick Joarder static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
34fbb0de79SGerd Hoffmann {
35fbb0de79SGerd Hoffmann struct vm_area_struct *vma = vmf->vma;
36fbb0de79SGerd Hoffmann struct udmabuf *ubuf = vma->vm_private_data;
3705b252ccSGerd Hoffmann pgoff_t pgoff = vmf->pgoff;
38ac48ddf6SVivek Kasireddy unsigned long pfn;
39fbb0de79SGerd Hoffmann
4005b252ccSGerd Hoffmann if (pgoff >= ubuf->pagecount)
4105b252ccSGerd Hoffmann return VM_FAULT_SIGBUS;
42ac48ddf6SVivek Kasireddy
43ac48ddf6SVivek Kasireddy pfn = page_to_pfn(ubuf->pages[pgoff]);
44ac48ddf6SVivek Kasireddy return vmf_insert_pfn(vma, vmf->address, pfn);
45fbb0de79SGerd Hoffmann }
46fbb0de79SGerd Hoffmann
47fbb0de79SGerd Hoffmann static const struct vm_operations_struct udmabuf_vm_ops = {
48fbb0de79SGerd Hoffmann .fault = udmabuf_vm_fault,
49fbb0de79SGerd Hoffmann };
50fbb0de79SGerd Hoffmann
mmap_udmabuf(struct dma_buf * buf,struct vm_area_struct * vma)51fbb0de79SGerd Hoffmann static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
52fbb0de79SGerd Hoffmann {
53fbb0de79SGerd Hoffmann struct udmabuf *ubuf = buf->priv;
54fbb0de79SGerd Hoffmann
55fbb0de79SGerd Hoffmann if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
56fbb0de79SGerd Hoffmann return -EINVAL;
57fbb0de79SGerd Hoffmann
58fbb0de79SGerd Hoffmann vma->vm_ops = &udmabuf_vm_ops;
59fbb0de79SGerd Hoffmann vma->vm_private_data = ubuf;
60ac48ddf6SVivek Kasireddy vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
61fbb0de79SGerd Hoffmann return 0;
62fbb0de79SGerd Hoffmann }
63fbb0de79SGerd Hoffmann
vmap_udmabuf(struct dma_buf * buf,struct iosys_map * map)647ae2e684SLukasz Wiecaszek static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
657ae2e684SLukasz Wiecaszek {
667ae2e684SLukasz Wiecaszek struct udmabuf *ubuf = buf->priv;
677ae2e684SLukasz Wiecaszek void *vaddr;
687ae2e684SLukasz Wiecaszek
697ae2e684SLukasz Wiecaszek dma_resv_assert_held(buf->resv);
707ae2e684SLukasz Wiecaszek
717ae2e684SLukasz Wiecaszek vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
727ae2e684SLukasz Wiecaszek if (!vaddr)
737ae2e684SLukasz Wiecaszek return -EINVAL;
747ae2e684SLukasz Wiecaszek
757ae2e684SLukasz Wiecaszek iosys_map_set_vaddr(map, vaddr);
767ae2e684SLukasz Wiecaszek return 0;
777ae2e684SLukasz Wiecaszek }
787ae2e684SLukasz Wiecaszek
vunmap_udmabuf(struct dma_buf * buf,struct iosys_map * map)797ae2e684SLukasz Wiecaszek static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
807ae2e684SLukasz Wiecaszek {
817ae2e684SLukasz Wiecaszek struct udmabuf *ubuf = buf->priv;
827ae2e684SLukasz Wiecaszek
837ae2e684SLukasz Wiecaszek dma_resv_assert_held(buf->resv);
847ae2e684SLukasz Wiecaszek
857ae2e684SLukasz Wiecaszek vm_unmap_ram(map->vaddr, ubuf->pagecount);
867ae2e684SLukasz Wiecaszek }
877ae2e684SLukasz Wiecaszek
get_sg_table(struct device * dev,struct dma_buf * buf,enum dma_data_direction direction)8817a7ce20SGurchetan Singh static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
89fbb0de79SGerd Hoffmann enum dma_data_direction direction)
90fbb0de79SGerd Hoffmann {
9117a7ce20SGurchetan Singh struct udmabuf *ubuf = buf->priv;
92fbb0de79SGerd Hoffmann struct sg_table *sg;
93a3e722daSGerd Hoffmann int ret;
94fbb0de79SGerd Hoffmann
95fbb0de79SGerd Hoffmann sg = kzalloc(sizeof(*sg), GFP_KERNEL);
96fbb0de79SGerd Hoffmann if (!sg)
97a3e722daSGerd Hoffmann return ERR_PTR(-ENOMEM);
98a3e722daSGerd Hoffmann ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
99fbb0de79SGerd Hoffmann 0, ubuf->pagecount << PAGE_SHIFT,
100a3e722daSGerd Hoffmann GFP_KERNEL);
101a3e722daSGerd Hoffmann if (ret < 0)
102a3e722daSGerd Hoffmann goto err;
10362296b39SMarek Szyprowski ret = dma_map_sgtable(dev, sg, direction, 0);
10462296b39SMarek Szyprowski if (ret < 0)
105a3e722daSGerd Hoffmann goto err;
106fbb0de79SGerd Hoffmann return sg;
107fbb0de79SGerd Hoffmann
108a3e722daSGerd Hoffmann err:
109fbb0de79SGerd Hoffmann sg_free_table(sg);
110fbb0de79SGerd Hoffmann kfree(sg);
111a3e722daSGerd Hoffmann return ERR_PTR(ret);
112fbb0de79SGerd Hoffmann }
113fbb0de79SGerd Hoffmann
put_sg_table(struct device * dev,struct sg_table * sg,enum dma_data_direction direction)11417a7ce20SGurchetan Singh static void put_sg_table(struct device *dev, struct sg_table *sg,
11517a7ce20SGurchetan Singh enum dma_data_direction direction)
11617a7ce20SGurchetan Singh {
11762296b39SMarek Szyprowski dma_unmap_sgtable(dev, sg, direction, 0);
11817a7ce20SGurchetan Singh sg_free_table(sg);
11917a7ce20SGurchetan Singh kfree(sg);
12017a7ce20SGurchetan Singh }
12117a7ce20SGurchetan Singh
map_udmabuf(struct dma_buf_attachment * at,enum dma_data_direction direction)12217a7ce20SGurchetan Singh static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
12317a7ce20SGurchetan Singh enum dma_data_direction direction)
12417a7ce20SGurchetan Singh {
12517a7ce20SGurchetan Singh return get_sg_table(at->dev, at->dmabuf, direction);
12617a7ce20SGurchetan Singh }
12717a7ce20SGurchetan Singh
unmap_udmabuf(struct dma_buf_attachment * at,struct sg_table * sg,enum dma_data_direction direction)128fbb0de79SGerd Hoffmann static void unmap_udmabuf(struct dma_buf_attachment *at,
129fbb0de79SGerd Hoffmann struct sg_table *sg,
130fbb0de79SGerd Hoffmann enum dma_data_direction direction)
131fbb0de79SGerd Hoffmann {
13217a7ce20SGurchetan Singh return put_sg_table(at->dev, sg, direction);
133fbb0de79SGerd Hoffmann }
134fbb0de79SGerd Hoffmann
release_udmabuf(struct dma_buf * buf)135fbb0de79SGerd Hoffmann static void release_udmabuf(struct dma_buf *buf)
136fbb0de79SGerd Hoffmann {
137fbb0de79SGerd Hoffmann struct udmabuf *ubuf = buf->priv;
138284562e1SGurchetan Singh struct device *dev = ubuf->device->this_device;
139fbb0de79SGerd Hoffmann pgoff_t pg;
140fbb0de79SGerd Hoffmann
141284562e1SGurchetan Singh if (ubuf->sg)
142284562e1SGurchetan Singh put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
143284562e1SGurchetan Singh
144fbb0de79SGerd Hoffmann for (pg = 0; pg < ubuf->pagecount; pg++)
145fbb0de79SGerd Hoffmann put_page(ubuf->pages[pg]);
146fbb0de79SGerd Hoffmann kfree(ubuf->pages);
147fbb0de79SGerd Hoffmann kfree(ubuf);
148fbb0de79SGerd Hoffmann }
149fbb0de79SGerd Hoffmann
begin_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)150284562e1SGurchetan Singh static int begin_cpu_udmabuf(struct dma_buf *buf,
151284562e1SGurchetan Singh enum dma_data_direction direction)
152284562e1SGurchetan Singh {
153284562e1SGurchetan Singh struct udmabuf *ubuf = buf->priv;
154284562e1SGurchetan Singh struct device *dev = ubuf->device->this_device;
155d9c04a1bSVivek Kasireddy int ret = 0;
156284562e1SGurchetan Singh
157284562e1SGurchetan Singh if (!ubuf->sg) {
158284562e1SGurchetan Singh ubuf->sg = get_sg_table(dev, buf, direction);
159d9c04a1bSVivek Kasireddy if (IS_ERR(ubuf->sg)) {
160d9c04a1bSVivek Kasireddy ret = PTR_ERR(ubuf->sg);
161d9c04a1bSVivek Kasireddy ubuf->sg = NULL;
162d9c04a1bSVivek Kasireddy }
163284562e1SGurchetan Singh } else {
1641ffe0959SGurchetan Singh dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
165284562e1SGurchetan Singh direction);
166284562e1SGurchetan Singh }
167284562e1SGurchetan Singh
168d9c04a1bSVivek Kasireddy return ret;
169284562e1SGurchetan Singh }
170284562e1SGurchetan Singh
end_cpu_udmabuf(struct dma_buf * buf,enum dma_data_direction direction)171284562e1SGurchetan Singh static int end_cpu_udmabuf(struct dma_buf *buf,
172284562e1SGurchetan Singh enum dma_data_direction direction)
173284562e1SGurchetan Singh {
174284562e1SGurchetan Singh struct udmabuf *ubuf = buf->priv;
175284562e1SGurchetan Singh struct device *dev = ubuf->device->this_device;
176284562e1SGurchetan Singh
177284562e1SGurchetan Singh if (!ubuf->sg)
178284562e1SGurchetan Singh return -EINVAL;
179284562e1SGurchetan Singh
1801ffe0959SGurchetan Singh dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
181284562e1SGurchetan Singh return 0;
182284562e1SGurchetan Singh }
183284562e1SGurchetan Singh
184a3485289SGerd Hoffmann static const struct dma_buf_ops udmabuf_ops = {
185bc7a71daSGurchetan Singh .cache_sgt_mapping = true,
186fbb0de79SGerd Hoffmann .map_dma_buf = map_udmabuf,
187fbb0de79SGerd Hoffmann .unmap_dma_buf = unmap_udmabuf,
188fbb0de79SGerd Hoffmann .release = release_udmabuf,
189fbb0de79SGerd Hoffmann .mmap = mmap_udmabuf,
1907ae2e684SLukasz Wiecaszek .vmap = vmap_udmabuf,
1917ae2e684SLukasz Wiecaszek .vunmap = vunmap_udmabuf,
192284562e1SGurchetan Singh .begin_cpu_access = begin_cpu_udmabuf,
193284562e1SGurchetan Singh .end_cpu_access = end_cpu_udmabuf,
194fbb0de79SGerd Hoffmann };
195fbb0de79SGerd Hoffmann
196fbb0de79SGerd Hoffmann #define SEALS_WANTED (F_SEAL_SHRINK)
197*50c960e7SJann Horn #define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
198fbb0de79SGerd Hoffmann
udmabuf_create(struct miscdevice * device,struct udmabuf_create_list * head,struct udmabuf_create_item * list)199c1bbed66SGurchetan Singh static long udmabuf_create(struct miscdevice *device,
200c1bbed66SGurchetan Singh struct udmabuf_create_list *head,
201c1bbed66SGurchetan Singh struct udmabuf_create_item *list)
202fbb0de79SGerd Hoffmann {
203fbb0de79SGerd Hoffmann DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
204fbb0de79SGerd Hoffmann struct file *memfd = NULL;
20516c243e9SVivek Kasireddy struct address_space *mapping = NULL;
206fbb0de79SGerd Hoffmann struct udmabuf *ubuf;
207fbb0de79SGerd Hoffmann struct dma_buf *buf;
2080d17455cSGerd Hoffmann pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
209b7cb3821SMike Kravetz struct page *page;
210fbb0de79SGerd Hoffmann int seals, ret = -EINVAL;
211fbb0de79SGerd Hoffmann u32 i, flags;
212fbb0de79SGerd Hoffmann
21333f35429SGerd Hoffmann ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
214fbb0de79SGerd Hoffmann if (!ubuf)
215fbb0de79SGerd Hoffmann return -ENOMEM;
216fbb0de79SGerd Hoffmann
217dc4716d7SGerd Hoffmann pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
218fbb0de79SGerd Hoffmann for (i = 0; i < head->count; i++) {
219fbb0de79SGerd Hoffmann if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
2200d17455cSGerd Hoffmann goto err;
221fbb0de79SGerd Hoffmann if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
2220d17455cSGerd Hoffmann goto err;
223fbb0de79SGerd Hoffmann ubuf->pagecount += list[i].size >> PAGE_SHIFT;
224dc4716d7SGerd Hoffmann if (ubuf->pagecount > pglimit)
2250d17455cSGerd Hoffmann goto err;
226fbb0de79SGerd Hoffmann }
2272b6dd600SPavel Skripkin
2282b6dd600SPavel Skripkin if (!ubuf->pagecount)
2292b6dd600SPavel Skripkin goto err;
2302b6dd600SPavel Skripkin
23133f35429SGerd Hoffmann ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
232fbb0de79SGerd Hoffmann GFP_KERNEL);
233fbb0de79SGerd Hoffmann if (!ubuf->pages) {
234fbb0de79SGerd Hoffmann ret = -ENOMEM;
2350d17455cSGerd Hoffmann goto err;
236fbb0de79SGerd Hoffmann }
237fbb0de79SGerd Hoffmann
238fbb0de79SGerd Hoffmann pgbuf = 0;
239fbb0de79SGerd Hoffmann for (i = 0; i < head->count; i++) {
2407a1c67d7SGerd Hoffmann ret = -EBADFD;
241fbb0de79SGerd Hoffmann memfd = fget(list[i].memfd);
242fbb0de79SGerd Hoffmann if (!memfd)
2430d17455cSGerd Hoffmann goto err;
244ff58105aSAl Viro mapping = memfd->f_mapping;
245b7cb3821SMike Kravetz if (!shmem_mapping(mapping))
2460d17455cSGerd Hoffmann goto err;
247fbb0de79SGerd Hoffmann seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
2487a1c67d7SGerd Hoffmann if (seals == -EINVAL)
2497a1c67d7SGerd Hoffmann goto err;
2507a1c67d7SGerd Hoffmann ret = -EINVAL;
2517a1c67d7SGerd Hoffmann if ((seals & SEALS_WANTED) != SEALS_WANTED ||
252fbb0de79SGerd Hoffmann (seals & SEALS_DENIED) != 0)
2530d17455cSGerd Hoffmann goto err;
254fbb0de79SGerd Hoffmann pgoff = list[i].offset >> PAGE_SHIFT;
255fbb0de79SGerd Hoffmann pgcnt = list[i].size >> PAGE_SHIFT;
256fbb0de79SGerd Hoffmann for (pgidx = 0; pgidx < pgcnt; pgidx++) {
257b7cb3821SMike Kravetz page = shmem_read_mapping_page(mapping, pgoff + pgidx);
258fbb0de79SGerd Hoffmann if (IS_ERR(page)) {
259fbb0de79SGerd Hoffmann ret = PTR_ERR(page);
2600d17455cSGerd Hoffmann goto err;
261fbb0de79SGerd Hoffmann }
262fbb0de79SGerd Hoffmann ubuf->pages[pgbuf++] = page;
263fbb0de79SGerd Hoffmann }
264fbb0de79SGerd Hoffmann fput(memfd);
265fbb0de79SGerd Hoffmann memfd = NULL;
2660d17455cSGerd Hoffmann }
267fbb0de79SGerd Hoffmann
268fbb0de79SGerd Hoffmann exp_info.ops = &udmabuf_ops;
269fbb0de79SGerd Hoffmann exp_info.size = ubuf->pagecount << PAGE_SHIFT;
270fbb0de79SGerd Hoffmann exp_info.priv = ubuf;
2715c074eeaSGerd Hoffmann exp_info.flags = O_RDWR;
272fbb0de79SGerd Hoffmann
273c1bbed66SGurchetan Singh ubuf->device = device;
274fbb0de79SGerd Hoffmann buf = dma_buf_export(&exp_info);
275fbb0de79SGerd Hoffmann if (IS_ERR(buf)) {
276fbb0de79SGerd Hoffmann ret = PTR_ERR(buf);
2770d17455cSGerd Hoffmann goto err;
278fbb0de79SGerd Hoffmann }
279fbb0de79SGerd Hoffmann
280fbb0de79SGerd Hoffmann flags = 0;
281fbb0de79SGerd Hoffmann if (head->flags & UDMABUF_FLAGS_CLOEXEC)
282fbb0de79SGerd Hoffmann flags |= O_CLOEXEC;
283fbb0de79SGerd Hoffmann return dma_buf_fd(buf, flags);
284fbb0de79SGerd Hoffmann
2850d17455cSGerd Hoffmann err:
286fbb0de79SGerd Hoffmann while (pgbuf > 0)
287fbb0de79SGerd Hoffmann put_page(ubuf->pages[--pgbuf]);
288683a0e63SGustavo A. R. Silva if (memfd)
289fbb0de79SGerd Hoffmann fput(memfd);
290fbb0de79SGerd Hoffmann kfree(ubuf->pages);
291fbb0de79SGerd Hoffmann kfree(ubuf);
292fbb0de79SGerd Hoffmann return ret;
293fbb0de79SGerd Hoffmann }
294fbb0de79SGerd Hoffmann
udmabuf_ioctl_create(struct file * filp,unsigned long arg)295fbb0de79SGerd Hoffmann static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
296fbb0de79SGerd Hoffmann {
297fbb0de79SGerd Hoffmann struct udmabuf_create create;
298fbb0de79SGerd Hoffmann struct udmabuf_create_list head;
299fbb0de79SGerd Hoffmann struct udmabuf_create_item list;
300fbb0de79SGerd Hoffmann
301fbb0de79SGerd Hoffmann if (copy_from_user(&create, (void __user *)arg,
30233f35429SGerd Hoffmann sizeof(create)))
303fbb0de79SGerd Hoffmann return -EFAULT;
304fbb0de79SGerd Hoffmann
305fbb0de79SGerd Hoffmann head.flags = create.flags;
306fbb0de79SGerd Hoffmann head.count = 1;
307fbb0de79SGerd Hoffmann list.memfd = create.memfd;
308fbb0de79SGerd Hoffmann list.offset = create.offset;
309fbb0de79SGerd Hoffmann list.size = create.size;
310fbb0de79SGerd Hoffmann
311c1bbed66SGurchetan Singh return udmabuf_create(filp->private_data, &head, &list);
312fbb0de79SGerd Hoffmann }
313fbb0de79SGerd Hoffmann
udmabuf_ioctl_create_list(struct file * filp,unsigned long arg)314fbb0de79SGerd Hoffmann static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
315fbb0de79SGerd Hoffmann {
316fbb0de79SGerd Hoffmann struct udmabuf_create_list head;
317fbb0de79SGerd Hoffmann struct udmabuf_create_item *list;
318fbb0de79SGerd Hoffmann int ret = -EINVAL;
319fbb0de79SGerd Hoffmann u32 lsize;
320fbb0de79SGerd Hoffmann
321fbb0de79SGerd Hoffmann if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
322fbb0de79SGerd Hoffmann return -EFAULT;
323dc4716d7SGerd Hoffmann if (head.count > list_limit)
324fbb0de79SGerd Hoffmann return -EINVAL;
325fbb0de79SGerd Hoffmann lsize = sizeof(struct udmabuf_create_item) * head.count;
326fbb0de79SGerd Hoffmann list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
327fbb0de79SGerd Hoffmann if (IS_ERR(list))
328fbb0de79SGerd Hoffmann return PTR_ERR(list);
329fbb0de79SGerd Hoffmann
330c1bbed66SGurchetan Singh ret = udmabuf_create(filp->private_data, &head, list);
331fbb0de79SGerd Hoffmann kfree(list);
332fbb0de79SGerd Hoffmann return ret;
333fbb0de79SGerd Hoffmann }
334fbb0de79SGerd Hoffmann
udmabuf_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)335fbb0de79SGerd Hoffmann static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
336fbb0de79SGerd Hoffmann unsigned long arg)
337fbb0de79SGerd Hoffmann {
338fbb0de79SGerd Hoffmann long ret;
339fbb0de79SGerd Hoffmann
340fbb0de79SGerd Hoffmann switch (ioctl) {
341fbb0de79SGerd Hoffmann case UDMABUF_CREATE:
342fbb0de79SGerd Hoffmann ret = udmabuf_ioctl_create(filp, arg);
343fbb0de79SGerd Hoffmann break;
344fbb0de79SGerd Hoffmann case UDMABUF_CREATE_LIST:
345fbb0de79SGerd Hoffmann ret = udmabuf_ioctl_create_list(filp, arg);
346fbb0de79SGerd Hoffmann break;
347fbb0de79SGerd Hoffmann default:
34852499d9cSGerd Hoffmann ret = -ENOTTY;
349fbb0de79SGerd Hoffmann break;
350fbb0de79SGerd Hoffmann }
351fbb0de79SGerd Hoffmann return ret;
352fbb0de79SGerd Hoffmann }
353fbb0de79SGerd Hoffmann
354fbb0de79SGerd Hoffmann static const struct file_operations udmabuf_fops = {
355fbb0de79SGerd Hoffmann .owner = THIS_MODULE,
356fbb0de79SGerd Hoffmann .unlocked_ioctl = udmabuf_ioctl,
357d4a197f4SKristian H. Kristensen #ifdef CONFIG_COMPAT
358d4a197f4SKristian H. Kristensen .compat_ioctl = udmabuf_ioctl,
359d4a197f4SKristian H. Kristensen #endif
360fbb0de79SGerd Hoffmann };
361fbb0de79SGerd Hoffmann
362fbb0de79SGerd Hoffmann static struct miscdevice udmabuf_misc = {
363fbb0de79SGerd Hoffmann .minor = MISC_DYNAMIC_MINOR,
364fbb0de79SGerd Hoffmann .name = "udmabuf",
365fbb0de79SGerd Hoffmann .fops = &udmabuf_fops,
366fbb0de79SGerd Hoffmann };
367fbb0de79SGerd Hoffmann
udmabuf_dev_init(void)368fbb0de79SGerd Hoffmann static int __init udmabuf_dev_init(void)
369fbb0de79SGerd Hoffmann {
3709e9fa6a9SVivek Kasireddy int ret;
3719e9fa6a9SVivek Kasireddy
3729e9fa6a9SVivek Kasireddy ret = misc_register(&udmabuf_misc);
3739e9fa6a9SVivek Kasireddy if (ret < 0) {
3749e9fa6a9SVivek Kasireddy pr_err("Could not initialize udmabuf device\n");
3759e9fa6a9SVivek Kasireddy return ret;
3769e9fa6a9SVivek Kasireddy }
3779e9fa6a9SVivek Kasireddy
3789e9fa6a9SVivek Kasireddy ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
3799e9fa6a9SVivek Kasireddy DMA_BIT_MASK(64));
3809e9fa6a9SVivek Kasireddy if (ret < 0) {
3819e9fa6a9SVivek Kasireddy pr_err("Could not setup DMA mask for udmabuf device\n");
3829e9fa6a9SVivek Kasireddy misc_deregister(&udmabuf_misc);
3839e9fa6a9SVivek Kasireddy return ret;
3849e9fa6a9SVivek Kasireddy }
3859e9fa6a9SVivek Kasireddy
3869e9fa6a9SVivek Kasireddy return 0;
387fbb0de79SGerd Hoffmann }
388fbb0de79SGerd Hoffmann
udmabuf_dev_exit(void)389fbb0de79SGerd Hoffmann static void __exit udmabuf_dev_exit(void)
390fbb0de79SGerd Hoffmann {
391fbb0de79SGerd Hoffmann misc_deregister(&udmabuf_misc);
392fbb0de79SGerd Hoffmann }
393fbb0de79SGerd Hoffmann
394fbb0de79SGerd Hoffmann module_init(udmabuf_dev_init)
395fbb0de79SGerd Hoffmann module_exit(udmabuf_dev_exit)
396fbb0de79SGerd Hoffmann
397fbb0de79SGerd Hoffmann MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
398