xref: /openbmc/linux/drivers/xen/grant-dma-ops.c (revision 14386d47)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4  * for providing grant references as DMA addresses to be used by frontends
5  * (e.g. virtio) in Xen guests
6  *
7  * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/dma-map-ops.h>
12 #include <linux/of.h>
13 #include <linux/pfn.h>
14 #include <linux/xarray.h>
15 #include <linux/virtio_anchor.h>
16 #include <linux/virtio.h>
17 #include <xen/xen.h>
18 #include <xen/xen-ops.h>
19 #include <xen/grant_table.h>
20 
21 struct xen_grant_dma_data {
22 	/* The ID of backend domain */
23 	domid_t backend_domid;
24 	/* Is device behaving sane? */
25 	bool broken;
26 };
27 
28 static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
29 
30 #define XEN_GRANT_DMA_ADDR_OFF	(1ULL << 63)
31 
32 static inline dma_addr_t grant_to_dma(grant_ref_t grant)
33 {
34 	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT);
35 }
36 
37 static inline grant_ref_t dma_to_grant(dma_addr_t dma)
38 {
39 	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT);
40 }
41 
42 static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
43 {
44 	struct xen_grant_dma_data *data;
45 	unsigned long flags;
46 
47 	xa_lock_irqsave(&xen_grant_dma_devices, flags);
48 	data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
49 	xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
50 
51 	return data;
52 }
53 
54 static int store_xen_grant_dma_data(struct device *dev,
55 				    struct xen_grant_dma_data *data)
56 {
57 	unsigned long flags;
58 	int ret;
59 
60 	xa_lock_irqsave(&xen_grant_dma_devices, flags);
61 	ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
62 			GFP_ATOMIC));
63 	xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
64 
65 	return ret;
66 }
67 
68 /*
69  * DMA ops for Xen frontends (e.g. virtio).
70  *
71  * Used to act as a kind of software IOMMU for Xen guests by using grants as
72  * DMA addresses.
73  * Such a DMA address is formed by using the grant reference as a frame
74  * number and setting the highest address bit (this bit is for the backend
75  * to be able to distinguish it from e.g. a mmio address).
76  */
77 static void *xen_grant_dma_alloc(struct device *dev, size_t size,
78 				 dma_addr_t *dma_handle, gfp_t gfp,
79 				 unsigned long attrs)
80 {
81 	struct xen_grant_dma_data *data;
82 	unsigned int i, n_pages = PFN_UP(size);
83 	unsigned long pfn;
84 	grant_ref_t grant;
85 	void *ret;
86 
87 	data = find_xen_grant_dma_data(dev);
88 	if (!data)
89 		return NULL;
90 
91 	if (unlikely(data->broken))
92 		return NULL;
93 
94 	ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp);
95 	if (!ret)
96 		return NULL;
97 
98 	pfn = virt_to_pfn(ret);
99 
100 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
101 		free_pages_exact(ret, n_pages * PAGE_SIZE);
102 		return NULL;
103 	}
104 
105 	for (i = 0; i < n_pages; i++) {
106 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
107 				pfn_to_gfn(pfn + i), 0);
108 	}
109 
110 	*dma_handle = grant_to_dma(grant);
111 
112 	return ret;
113 }
114 
115 static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
116 			       dma_addr_t dma_handle, unsigned long attrs)
117 {
118 	struct xen_grant_dma_data *data;
119 	unsigned int i, n_pages = PFN_UP(size);
120 	grant_ref_t grant;
121 
122 	data = find_xen_grant_dma_data(dev);
123 	if (!data)
124 		return;
125 
126 	if (unlikely(data->broken))
127 		return;
128 
129 	grant = dma_to_grant(dma_handle);
130 
131 	for (i = 0; i < n_pages; i++) {
132 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
133 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
134 			data->broken = true;
135 			return;
136 		}
137 	}
138 
139 	gnttab_free_grant_reference_seq(grant, n_pages);
140 
141 	free_pages_exact(vaddr, n_pages * PAGE_SIZE);
142 }
143 
144 static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
145 					      dma_addr_t *dma_handle,
146 					      enum dma_data_direction dir,
147 					      gfp_t gfp)
148 {
149 	void *vaddr;
150 
151 	vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
152 	if (!vaddr)
153 		return NULL;
154 
155 	return virt_to_page(vaddr);
156 }
157 
158 static void xen_grant_dma_free_pages(struct device *dev, size_t size,
159 				     struct page *vaddr, dma_addr_t dma_handle,
160 				     enum dma_data_direction dir)
161 {
162 	xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
163 }
164 
165 static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
166 					 unsigned long offset, size_t size,
167 					 enum dma_data_direction dir,
168 					 unsigned long attrs)
169 {
170 	struct xen_grant_dma_data *data;
171 	unsigned int i, n_pages = PFN_UP(offset + size);
172 	grant_ref_t grant;
173 	dma_addr_t dma_handle;
174 
175 	if (WARN_ON(dir == DMA_NONE))
176 		return DMA_MAPPING_ERROR;
177 
178 	data = find_xen_grant_dma_data(dev);
179 	if (!data)
180 		return DMA_MAPPING_ERROR;
181 
182 	if (unlikely(data->broken))
183 		return DMA_MAPPING_ERROR;
184 
185 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
186 		return DMA_MAPPING_ERROR;
187 
188 	for (i = 0; i < n_pages; i++) {
189 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
190 				xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE);
191 	}
192 
193 	dma_handle = grant_to_dma(grant) + offset;
194 
195 	return dma_handle;
196 }
197 
198 static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
199 				     size_t size, enum dma_data_direction dir,
200 				     unsigned long attrs)
201 {
202 	struct xen_grant_dma_data *data;
203 	unsigned long offset = dma_handle & (PAGE_SIZE - 1);
204 	unsigned int i, n_pages = PFN_UP(offset + size);
205 	grant_ref_t grant;
206 
207 	if (WARN_ON(dir == DMA_NONE))
208 		return;
209 
210 	data = find_xen_grant_dma_data(dev);
211 	if (!data)
212 		return;
213 
214 	if (unlikely(data->broken))
215 		return;
216 
217 	grant = dma_to_grant(dma_handle);
218 
219 	for (i = 0; i < n_pages; i++) {
220 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
221 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
222 			data->broken = true;
223 			return;
224 		}
225 	}
226 
227 	gnttab_free_grant_reference_seq(grant, n_pages);
228 }
229 
230 static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
231 				   int nents, enum dma_data_direction dir,
232 				   unsigned long attrs)
233 {
234 	struct scatterlist *s;
235 	unsigned int i;
236 
237 	if (WARN_ON(dir == DMA_NONE))
238 		return;
239 
240 	for_each_sg(sg, s, nents, i)
241 		xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
242 				attrs);
243 }
244 
245 static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
246 				int nents, enum dma_data_direction dir,
247 				unsigned long attrs)
248 {
249 	struct scatterlist *s;
250 	unsigned int i;
251 
252 	if (WARN_ON(dir == DMA_NONE))
253 		return -EINVAL;
254 
255 	for_each_sg(sg, s, nents, i) {
256 		s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
257 				s->length, dir, attrs);
258 		if (s->dma_address == DMA_MAPPING_ERROR)
259 			goto out;
260 
261 		sg_dma_len(s) = s->length;
262 	}
263 
264 	return nents;
265 
266 out:
267 	xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
268 	sg_dma_len(sg) = 0;
269 
270 	return -EIO;
271 }
272 
273 static int xen_grant_dma_supported(struct device *dev, u64 mask)
274 {
275 	return mask == DMA_BIT_MASK(64);
276 }
277 
278 static const struct dma_map_ops xen_grant_dma_ops = {
279 	.alloc = xen_grant_dma_alloc,
280 	.free = xen_grant_dma_free,
281 	.alloc_pages = xen_grant_dma_alloc_pages,
282 	.free_pages = xen_grant_dma_free_pages,
283 	.mmap = dma_common_mmap,
284 	.get_sgtable = dma_common_get_sgtable,
285 	.map_page = xen_grant_dma_map_page,
286 	.unmap_page = xen_grant_dma_unmap_page,
287 	.map_sg = xen_grant_dma_map_sg,
288 	.unmap_sg = xen_grant_dma_unmap_sg,
289 	.dma_supported = xen_grant_dma_supported,
290 };
291 
292 static bool xen_is_dt_grant_dma_device(struct device *dev)
293 {
294 	struct device_node *iommu_np;
295 	bool has_iommu;
296 
297 	iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
298 	has_iommu = iommu_np &&
299 		    of_device_is_compatible(iommu_np, "xen,grant-dma");
300 	of_node_put(iommu_np);
301 
302 	return has_iommu;
303 }
304 
305 bool xen_is_grant_dma_device(struct device *dev)
306 {
307 	/* XXX Handle only DT devices for now */
308 	if (dev->of_node)
309 		return xen_is_dt_grant_dma_device(dev);
310 
311 	return false;
312 }
313 
314 bool xen_virtio_mem_acc(struct virtio_device *dev)
315 {
316 	if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
317 		return true;
318 
319 	return xen_is_grant_dma_device(dev->dev.parent);
320 }
321 
322 static int xen_dt_grant_init_backend_domid(struct device *dev,
323 					   struct xen_grant_dma_data *data)
324 {
325 	struct of_phandle_args iommu_spec;
326 
327 	if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
328 			0, &iommu_spec)) {
329 		dev_err(dev, "Cannot parse iommus property\n");
330 		return -ESRCH;
331 	}
332 
333 	if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
334 			iommu_spec.args_count != 1) {
335 		dev_err(dev, "Incompatible IOMMU node\n");
336 		of_node_put(iommu_spec.np);
337 		return -ESRCH;
338 	}
339 
340 	of_node_put(iommu_spec.np);
341 
342 	/*
343 	 * The endpoint ID here means the ID of the domain where the
344 	 * corresponding backend is running
345 	 */
346 	data->backend_domid = iommu_spec.args[0];
347 
348 	return 0;
349 }
350 
351 void xen_grant_setup_dma_ops(struct device *dev)
352 {
353 	struct xen_grant_dma_data *data;
354 
355 	data = find_xen_grant_dma_data(dev);
356 	if (data) {
357 		dev_err(dev, "Xen grant DMA data is already created\n");
358 		return;
359 	}
360 
361 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
362 	if (!data)
363 		goto err;
364 
365 	if (dev->of_node) {
366 		if (xen_dt_grant_init_backend_domid(dev, data))
367 			goto err;
368 	} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
369 		dev_info(dev, "Using dom0 as backend\n");
370 		data->backend_domid = 0;
371 	} else {
372 		/* XXX ACPI device unsupported for now */
373 		goto err;
374 	}
375 
376 	if (store_xen_grant_dma_data(dev, data)) {
377 		dev_err(dev, "Cannot store Xen grant DMA data\n");
378 		goto err;
379 	}
380 
381 	dev->dma_ops = &xen_grant_dma_ops;
382 
383 	return;
384 
385 err:
386 	devm_kfree(dev, data);
387 	dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
388 }
389 
390 bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
391 {
392 	bool ret = xen_virtio_mem_acc(dev);
393 
394 	if (ret)
395 		xen_grant_setup_dma_ops(dev->dev.parent);
396 
397 	return ret;
398 }
399 
400 MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
401 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
402 MODULE_LICENSE("GPL");
403