1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include "mock_dmabuf.h"
8 
9 static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
10 					 enum dma_data_direction dir)
11 {
12 	struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
13 	struct sg_table *st;
14 	struct scatterlist *sg;
15 	int i, err;
16 
17 	st = kmalloc(sizeof(*st), GFP_KERNEL);
18 	if (!st)
19 		return ERR_PTR(-ENOMEM);
20 
21 	err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
22 	if (err)
23 		goto err_free;
24 
25 	sg = st->sgl;
26 	for (i = 0; i < mock->npages; i++) {
27 		sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
28 		sg = sg_next(sg);
29 	}
30 
31 	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
32 		err = -ENOMEM;
33 		goto err_st;
34 	}
35 
36 	return st;
37 
38 err_st:
39 	sg_free_table(st);
40 err_free:
41 	kfree(st);
42 	return ERR_PTR(err);
43 }
44 
45 static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
46 			       struct sg_table *st,
47 			       enum dma_data_direction dir)
48 {
49 	dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
50 	sg_free_table(st);
51 	kfree(st);
52 }
53 
54 static void mock_dmabuf_release(struct dma_buf *dma_buf)
55 {
56 	struct mock_dmabuf *mock = to_mock(dma_buf);
57 	int i;
58 
59 	for (i = 0; i < mock->npages; i++)
60 		put_page(mock->pages[i]);
61 
62 	kfree(mock);
63 }
64 
65 static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
66 {
67 	struct mock_dmabuf *mock = to_mock(dma_buf);
68 
69 	return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
70 }
71 
72 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
73 {
74 	struct mock_dmabuf *mock = to_mock(dma_buf);
75 
76 	vm_unmap_ram(vaddr, mock->npages);
77 }
78 
79 static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
80 {
81 	struct mock_dmabuf *mock = to_mock(dma_buf);
82 
83 	return kmap(mock->pages[page_num]);
84 }
85 
86 static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
87 {
88 	struct mock_dmabuf *mock = to_mock(dma_buf);
89 
90 	return kunmap(mock->pages[page_num]);
91 }
92 
93 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
94 {
95 	return -ENODEV;
96 }
97 
98 static const struct dma_buf_ops mock_dmabuf_ops =  {
99 	.map_dma_buf = mock_map_dma_buf,
100 	.unmap_dma_buf = mock_unmap_dma_buf,
101 	.release = mock_dmabuf_release,
102 	.map = mock_dmabuf_kmap,
103 	.unmap = mock_dmabuf_kunmap,
104 	.mmap = mock_dmabuf_mmap,
105 	.vmap = mock_dmabuf_vmap,
106 	.vunmap = mock_dmabuf_vunmap,
107 };
108 
109 static struct dma_buf *mock_dmabuf(int npages)
110 {
111 	struct mock_dmabuf *mock;
112 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
113 	struct dma_buf *dmabuf;
114 	int i;
115 
116 	mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
117 		       GFP_KERNEL);
118 	if (!mock)
119 		return ERR_PTR(-ENOMEM);
120 
121 	mock->npages = npages;
122 	for (i = 0; i < npages; i++) {
123 		mock->pages[i] = alloc_page(GFP_KERNEL);
124 		if (!mock->pages[i])
125 			goto err;
126 	}
127 
128 	exp_info.ops = &mock_dmabuf_ops;
129 	exp_info.size = npages * PAGE_SIZE;
130 	exp_info.flags = O_CLOEXEC;
131 	exp_info.priv = mock;
132 
133 	dmabuf = dma_buf_export(&exp_info);
134 	if (IS_ERR(dmabuf))
135 		goto err;
136 
137 	return dmabuf;
138 
139 err:
140 	while (i--)
141 		put_page(mock->pages[i]);
142 	kfree(mock);
143 	return ERR_PTR(-ENOMEM);
144 }
145