xref: /openbmc/linux/drivers/xen/grant-dma-ops.c (revision acf50233)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4  * for providing grant references as DMA addresses to be used by frontends
5  * (e.g. virtio) in Xen guests
6  *
7  * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/of.h>
13 #include <linux/pfn.h>
14 #include <linux/xarray.h>
15 #include <xen/xen.h>
16 #include <xen/xen-ops.h>
17 #include <xen/grant_table.h>
18 
19 struct xen_grant_dma_data {
20 	/* The ID of backend domain */
21 	domid_t backend_domid;
22 	/* Is device behaving sane? */
23 	bool broken;
24 };
25 
26 static DEFINE_XARRAY(xen_grant_dma_devices);
27 
28 #define XEN_GRANT_DMA_ADDR_OFF	(1ULL << 63)
29 
30 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
31 {
32 	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
33 }
34 
35 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
36 {
37 	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
38 }
39 
40 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
41 {
42 	struct xen_grant_dma_data *data;
43 
44 	xa_lock(&xen_grant_dma_devices);
45 	data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
46 	xa_unlock(&xen_grant_dma_devices);
47 
48 	return data;
49 }
50 
51 /*
52  * DMA ops for Xen frontends (e.g. virtio).
53  *
54  * Used to act as a kind of software IOMMU for Xen guests by using grants as
55  * DMA addresses.
56  * Such a DMA address is formed by using the grant reference as a frame
57  * number and setting the highest address bit (this bit is for the backend
58  * to be able to distinguish it from e.g. a mmio address).
59  */
60 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
61 				 dma_addr_t *dma_handle, gfp_t gfp,
62 				 unsigned long attrs)
63 {
64 	struct xen_grant_dma_data *data;
65 	unsigned int i, n_pages = PFN_UP(size);
66 	unsigned long pfn;
67 	grant_ref_t grant;
68 	void *ret;
69 
70 	data = find_xen_grant_dma_data(dev);
71 	if (!data)
72 		return NULL;
73 
74 	if (unlikely(data->broken))
75 		return NULL;
76 
77 	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
78 	if (!ret)
79 		return NULL;
80 
81 	pfn = virt_to_pfn(ret);
82 
83 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
84 		free_pages_exact(ret, n_pages * PAGE_SIZE);
85 		return NULL;
86 	}
87 
88 	for (i = 0; i < n_pages; i++) {
89 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
90 				pfn_to_gfn(pfn + i), 0);
91 	}
92 
93 	*dma_handle = grant_to_dma(grant);
94 
95 	return ret;
96 }
97 
98 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
99 			       dma_addr_t dma_handle, unsigned long attrs)
100 {
101 	struct xen_grant_dma_data *data;
102 	unsigned int i, n_pages = PFN_UP(size);
103 	grant_ref_t grant;
104 
105 	data = find_xen_grant_dma_data(dev);
106 	if (!data)
107 		return;
108 
109 	if (unlikely(data->broken))
110 		return;
111 
112 	grant = dma_to_grant(dma_handle);
113 
114 	for (i = 0; i < n_pages; i++) {
115 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
116 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
117 			data->broken = true;
118 			return;
119 		}
120 	}
121 
122 	gnttab_free_grant_reference_seq(grant, n_pages);
123 
124 	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
125 }
126 
127 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
128 					      dma_addr_t *dma_handle,
129 					      enum dma_data_direction dir,
130 					      gfp_t gfp)
131 {
132 	void *vaddr;
133 
134 	vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
135 	if (!vaddr)
136 		return NULL;
137 
138 	return virt_to_page(vaddr);
139 }
140 
141 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
142 				     struct page *vaddr, dma_addr_t dma_handle,
143 				     enum dma_data_direction dir)
144 {
145 	xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
146 }
147 
148 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
149 					 unsigned long offset, size_t size,
150 					 enum dma_data_direction dir,
151 					 unsigned long attrs)
152 {
153 	struct xen_grant_dma_data *data;
154 	unsigned int i, n_pages = PFN_UP(size);
155 	grant_ref_t grant;
156 	dma_addr_t dma_handle;
157 
158 	if (WARN_ON(dir == DMA_NONE))
159 		return DMA_MAPPING_ERROR;
160 
161 	data = find_xen_grant_dma_data(dev);
162 	if (!data)
163 		return DMA_MAPPING_ERROR;
164 
165 	if (unlikely(data->broken))
166 		return DMA_MAPPING_ERROR;
167 
168 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
169 		return DMA_MAPPING_ERROR;
170 
171 	for (i = 0; i < n_pages; i++) {
172 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
173 				xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
174 	}
175 
176 	dma_handle = grant_to_dma(grant) + offset;
177 
178 	return dma_handle;
179 }
180 
181 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
182 				     size_t size, enum dma_data_direction dir,
183 				     unsigned long attrs)
184 {
185 	struct xen_grant_dma_data *data;
186 	unsigned int i, n_pages = PFN_UP(size);
187 	grant_ref_t grant;
188 
189 	if (WARN_ON(dir == DMA_NONE))
190 		return;
191 
192 	data = find_xen_grant_dma_data(dev);
193 	if (!data)
194 		return;
195 
196 	if (unlikely(data->broken))
197 		return;
198 
199 	grant = dma_to_grant(dma_handle);
200 
201 	for (i = 0; i < n_pages; i++) {
202 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
203 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
204 			data->broken = true;
205 			return;
206 		}
207 	}
208 
209 	gnttab_free_grant_reference_seq(grant, n_pages);
210 }
211 
212 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
213 				   int nents, enum dma_data_direction dir,
214 				   unsigned long attrs)
215 {
216 	struct scatterlist *s;
217 	unsigned int i;
218 
219 	if (WARN_ON(dir == DMA_NONE))
220 		return;
221 
222 	for_each_sg(sg, s, nents, i)
223 		xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
224 				attrs);
225 }
226 
227 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
228 				int nents, enum dma_data_direction dir,
229 				unsigned long attrs)
230 {
231 	struct scatterlist *s;
232 	unsigned int i;
233 
234 	if (WARN_ON(dir == DMA_NONE))
235 		return -EINVAL;
236 
237 	for_each_sg(sg, s, nents, i) {
238 		s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
239 				s->length, dir, attrs);
240 		if (s->dma_address == DMA_MAPPING_ERROR)
241 			goto out;
242 
243 		sg_dma_len(s) = s->length;
244 	}
245 
246 	return nents;
247 
248 out:
249 	xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
250 	sg_dma_len(sg) = 0;
251 
252 	return -EIO;
253 }
254 
255 static int xen_grant_dma_supported(struct device *dev, u64 mask)
256 {
257 	return mask == DMA_BIT_MASK(64);
258 }
259 
260 static const struct dma_map_ops xen_grant_dma_ops = {
261 	.alloc = xen_grant_dma_alloc,
262 	.free = xen_grant_dma_free,
263 	.alloc_pages = xen_grant_dma_alloc_pages,
264 	.free_pages = xen_grant_dma_free_pages,
265 	.mmap = dma_common_mmap,
266 	.get_sgtable = dma_common_get_sgtable,
267 	.map_page = xen_grant_dma_map_page,
268 	.unmap_page = xen_grant_dma_unmap_page,
269 	.map_sg = xen_grant_dma_map_sg,
270 	.unmap_sg = xen_grant_dma_unmap_sg,
271 	.dma_supported = xen_grant_dma_supported,
272 };
273 
274 bool xen_is_grant_dma_device(struct device *dev)
275 {
276 	struct device_node *iommu_np;
277 	bool has_iommu;
278 
279 	/* XXX Handle only DT devices for now */
280 	if (!dev->of_node)
281 		return false;
282 
283 	iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
284 	has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma");
285 	of_node_put(iommu_np);
286 
287 	return has_iommu;
288 }
289 
290 void xen_grant_setup_dma_ops(struct device *dev)
291 {
292 	struct xen_grant_dma_data *data;
293 	struct of_phandle_args iommu_spec;
294 
295 	data = find_xen_grant_dma_data(dev);
296 	if (data) {
297 		dev_err(dev, "Xen grant DMA data is already created\n");
298 		return;
299 	}
300 
301 	/* XXX ACPI device unsupported for now */
302 	if (!dev->of_node)
303 		goto err;
304 
305 	if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
306 			0, &iommu_spec)) {
307 		dev_err(dev, "Cannot parse iommus property\n");
308 		goto err;
309 	}
310 
311 	if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
312 			iommu_spec.args_count != 1) {
313 		dev_err(dev, "Incompatible IOMMU node\n");
314 		of_node_put(iommu_spec.np);
315 		goto err;
316 	}
317 
318 	of_node_put(iommu_spec.np);
319 
320 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
321 	if (!data)
322 		goto err;
323 
324 	/*
325 	 * The endpoint ID here means the ID of the domain where the corresponding
326 	 * backend is running
327 	 */
328 	data->backend_domid = iommu_spec.args[0];
329 
330 	if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
331 			GFP_KERNEL))) {
332 		dev_err(dev, "Cannot store Xen grant DMA data\n");
333 		goto err;
334 	}
335 
336 	dev->dma_ops = &xen_grant_dma_ops;
337 
338 	return;
339 
340 err:
341 	dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
342 }
343 
344 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
345 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
346 MODULE_LICENSE("GPL");
347