xref: /openbmc/linux/drivers/xen/grant-dma-ops.c (revision 21a235bc)
1d6aca350SJuergen Gross // SPDX-License-Identifier: GPL-2.0-only
2d6aca350SJuergen Gross /*
3d6aca350SJuergen Gross  * Xen grant DMA-mapping layer - contains special DMA-mapping routines
4d6aca350SJuergen Gross  * for providing grant references as DMA addresses to be used by frontends
5d6aca350SJuergen Gross  * (e.g. virtio) in Xen guests
6d6aca350SJuergen Gross  *
7d6aca350SJuergen Gross  * Copyright (c) 2021, Juergen Gross <jgross@suse.com>
8d6aca350SJuergen Gross  */
9d6aca350SJuergen Gross 
10d6aca350SJuergen Gross #include <linux/module.h>
11d6aca350SJuergen Gross #include <linux/dma-map-ops.h>
12d6aca350SJuergen Gross #include <linux/of.h>
13ef8ae384SOleksandr Tyshchenko #include <linux/pci.h>
14d6aca350SJuergen Gross #include <linux/pfn.h>
15d6aca350SJuergen Gross #include <linux/xarray.h>
16251e90e7SJuergen Gross #include <linux/virtio_anchor.h>
17251e90e7SJuergen Gross #include <linux/virtio.h>
18d6aca350SJuergen Gross #include <xen/xen.h>
19d6aca350SJuergen Gross #include <xen/xen-ops.h>
20d6aca350SJuergen Gross #include <xen/grant_table.h>
21d6aca350SJuergen Gross 
22d6aca350SJuergen Gross struct xen_grant_dma_data {
23d6aca350SJuergen Gross 	/* The ID of backend domain */
24d6aca350SJuergen Gross 	domid_t backend_domid;
25d6aca350SJuergen Gross 	/* Is device behaving sane? */
26d6aca350SJuergen Gross 	bool broken;
27d6aca350SJuergen Gross };
28d6aca350SJuergen Gross 
2977be00f1SOleksandr Tyshchenko static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
30d6aca350SJuergen Gross 
31d6aca350SJuergen Gross #define XEN_GRANT_DMA_ADDR_OFF	(1ULL << 63)
32d6aca350SJuergen Gross 
grant_to_dma(grant_ref_t grant)33d6aca350SJuergen Gross static inline dma_addr_t grant_to_dma(grant_ref_t grant)
34d6aca350SJuergen Gross {
35a383dcb1SOleksandr Tyshchenko 	return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT);
36d6aca350SJuergen Gross }
37d6aca350SJuergen Gross 
dma_to_grant(dma_addr_t dma)38d6aca350SJuergen Gross static inline grant_ref_t dma_to_grant(dma_addr_t dma)
39d6aca350SJuergen Gross {
40a383dcb1SOleksandr Tyshchenko 	return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT);
41d6aca350SJuergen Gross }
42d6aca350SJuergen Gross 
find_xen_grant_dma_data(struct device * dev)43d6aca350SJuergen Gross static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
44d6aca350SJuergen Gross {
45d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
4677be00f1SOleksandr Tyshchenko 	unsigned long flags;
47d6aca350SJuergen Gross 
4877be00f1SOleksandr Tyshchenko 	xa_lock_irqsave(&xen_grant_dma_devices, flags);
49d6aca350SJuergen Gross 	data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
5077be00f1SOleksandr Tyshchenko 	xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
51d6aca350SJuergen Gross 
52d6aca350SJuergen Gross 	return data;
53d6aca350SJuergen Gross }
54d6aca350SJuergen Gross 
store_xen_grant_dma_data(struct device * dev,struct xen_grant_dma_data * data)5577be00f1SOleksandr Tyshchenko static int store_xen_grant_dma_data(struct device *dev,
5677be00f1SOleksandr Tyshchenko 				    struct xen_grant_dma_data *data)
5777be00f1SOleksandr Tyshchenko {
5877be00f1SOleksandr Tyshchenko 	unsigned long flags;
5977be00f1SOleksandr Tyshchenko 	int ret;
6077be00f1SOleksandr Tyshchenko 
6177be00f1SOleksandr Tyshchenko 	xa_lock_irqsave(&xen_grant_dma_devices, flags);
6277be00f1SOleksandr Tyshchenko 	ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
6377be00f1SOleksandr Tyshchenko 			GFP_ATOMIC));
6477be00f1SOleksandr Tyshchenko 	xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
6577be00f1SOleksandr Tyshchenko 
6677be00f1SOleksandr Tyshchenko 	return ret;
6777be00f1SOleksandr Tyshchenko }
6877be00f1SOleksandr Tyshchenko 
69d6aca350SJuergen Gross /*
70d6aca350SJuergen Gross  * DMA ops for Xen frontends (e.g. virtio).
71d6aca350SJuergen Gross  *
72d6aca350SJuergen Gross  * Used to act as a kind of software IOMMU for Xen guests by using grants as
73d6aca350SJuergen Gross  * DMA addresses.
74d6aca350SJuergen Gross  * Such a DMA address is formed by using the grant reference as a frame
75d6aca350SJuergen Gross  * number and setting the highest address bit (this bit is for the backend
76d6aca350SJuergen Gross  * to be able to distinguish it from e.g. a mmio address).
77d6aca350SJuergen Gross  */
xen_grant_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)78d6aca350SJuergen Gross static void *xen_grant_dma_alloc(struct device *dev, size_t size,
79d6aca350SJuergen Gross 				 dma_addr_t *dma_handle, gfp_t gfp,
80d6aca350SJuergen Gross 				 unsigned long attrs)
81d6aca350SJuergen Gross {
82d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
83a383dcb1SOleksandr Tyshchenko 	unsigned int i, n_pages = XEN_PFN_UP(size);
84d6aca350SJuergen Gross 	unsigned long pfn;
85d6aca350SJuergen Gross 	grant_ref_t grant;
86d6aca350SJuergen Gross 	void *ret;
87d6aca350SJuergen Gross 
88d6aca350SJuergen Gross 	data = find_xen_grant_dma_data(dev);
89d6aca350SJuergen Gross 	if (!data)
90d6aca350SJuergen Gross 		return NULL;
91d6aca350SJuergen Gross 
92d6aca350SJuergen Gross 	if (unlikely(data->broken))
93d6aca350SJuergen Gross 		return NULL;
94d6aca350SJuergen Gross 
95a383dcb1SOleksandr Tyshchenko 	ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp);
96d6aca350SJuergen Gross 	if (!ret)
97d6aca350SJuergen Gross 		return NULL;
98d6aca350SJuergen Gross 
99d6aca350SJuergen Gross 	pfn = virt_to_pfn(ret);
100d6aca350SJuergen Gross 
101d6aca350SJuergen Gross 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) {
102a383dcb1SOleksandr Tyshchenko 		free_pages_exact(ret, n_pages * XEN_PAGE_SIZE);
103d6aca350SJuergen Gross 		return NULL;
104d6aca350SJuergen Gross 	}
105d6aca350SJuergen Gross 
106d6aca350SJuergen Gross 	for (i = 0; i < n_pages; i++) {
107d6aca350SJuergen Gross 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
108d6aca350SJuergen Gross 				pfn_to_gfn(pfn + i), 0);
109d6aca350SJuergen Gross 	}
110d6aca350SJuergen Gross 
111d6aca350SJuergen Gross 	*dma_handle = grant_to_dma(grant);
112d6aca350SJuergen Gross 
113d6aca350SJuergen Gross 	return ret;
114d6aca350SJuergen Gross }
115d6aca350SJuergen Gross 
xen_grant_dma_free(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)116d6aca350SJuergen Gross static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr,
117d6aca350SJuergen Gross 			       dma_addr_t dma_handle, unsigned long attrs)
118d6aca350SJuergen Gross {
119d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
120a383dcb1SOleksandr Tyshchenko 	unsigned int i, n_pages = XEN_PFN_UP(size);
121d6aca350SJuergen Gross 	grant_ref_t grant;
122d6aca350SJuergen Gross 
123d6aca350SJuergen Gross 	data = find_xen_grant_dma_data(dev);
124d6aca350SJuergen Gross 	if (!data)
125d6aca350SJuergen Gross 		return;
126d6aca350SJuergen Gross 
127d6aca350SJuergen Gross 	if (unlikely(data->broken))
128d6aca350SJuergen Gross 		return;
129d6aca350SJuergen Gross 
130d6aca350SJuergen Gross 	grant = dma_to_grant(dma_handle);
131d6aca350SJuergen Gross 
132d6aca350SJuergen Gross 	for (i = 0; i < n_pages; i++) {
133d6aca350SJuergen Gross 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
134d6aca350SJuergen Gross 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
135d6aca350SJuergen Gross 			data->broken = true;
136d6aca350SJuergen Gross 			return;
137d6aca350SJuergen Gross 		}
138d6aca350SJuergen Gross 	}
139d6aca350SJuergen Gross 
140d6aca350SJuergen Gross 	gnttab_free_grant_reference_seq(grant, n_pages);
141d6aca350SJuergen Gross 
142a383dcb1SOleksandr Tyshchenko 	free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE);
143d6aca350SJuergen Gross }
144d6aca350SJuergen Gross 
xen_grant_dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)145d6aca350SJuergen Gross static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size,
146d6aca350SJuergen Gross 					      dma_addr_t *dma_handle,
147d6aca350SJuergen Gross 					      enum dma_data_direction dir,
148d6aca350SJuergen Gross 					      gfp_t gfp)
149d6aca350SJuergen Gross {
150d6aca350SJuergen Gross 	void *vaddr;
151d6aca350SJuergen Gross 
152d6aca350SJuergen Gross 	vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0);
153d6aca350SJuergen Gross 	if (!vaddr)
154d6aca350SJuergen Gross 		return NULL;
155d6aca350SJuergen Gross 
156d6aca350SJuergen Gross 	return virt_to_page(vaddr);
157d6aca350SJuergen Gross }
158d6aca350SJuergen Gross 
xen_grant_dma_free_pages(struct device * dev,size_t size,struct page * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)159d6aca350SJuergen Gross static void xen_grant_dma_free_pages(struct device *dev, size_t size,
160d6aca350SJuergen Gross 				     struct page *vaddr, dma_addr_t dma_handle,
161d6aca350SJuergen Gross 				     enum dma_data_direction dir)
162d6aca350SJuergen Gross {
163d6aca350SJuergen Gross 	xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0);
164d6aca350SJuergen Gross }
165d6aca350SJuergen Gross 
xen_grant_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)166d6aca350SJuergen Gross static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page,
167d6aca350SJuergen Gross 					 unsigned long offset, size_t size,
168d6aca350SJuergen Gross 					 enum dma_data_direction dir,
169d6aca350SJuergen Gross 					 unsigned long attrs)
170d6aca350SJuergen Gross {
171d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
172a383dcb1SOleksandr Tyshchenko 	unsigned long dma_offset = xen_offset_in_page(offset),
173a383dcb1SOleksandr Tyshchenko 			pfn_offset = XEN_PFN_DOWN(offset);
174a383dcb1SOleksandr Tyshchenko 	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
175d6aca350SJuergen Gross 	grant_ref_t grant;
176d6aca350SJuergen Gross 	dma_addr_t dma_handle;
177d6aca350SJuergen Gross 
178d6aca350SJuergen Gross 	if (WARN_ON(dir == DMA_NONE))
179d6aca350SJuergen Gross 		return DMA_MAPPING_ERROR;
180d6aca350SJuergen Gross 
181d6aca350SJuergen Gross 	data = find_xen_grant_dma_data(dev);
182d6aca350SJuergen Gross 	if (!data)
183d6aca350SJuergen Gross 		return DMA_MAPPING_ERROR;
184d6aca350SJuergen Gross 
185d6aca350SJuergen Gross 	if (unlikely(data->broken))
186d6aca350SJuergen Gross 		return DMA_MAPPING_ERROR;
187d6aca350SJuergen Gross 
188d6aca350SJuergen Gross 	if (gnttab_alloc_grant_reference_seq(n_pages, &grant))
189d6aca350SJuergen Gross 		return DMA_MAPPING_ERROR;
190d6aca350SJuergen Gross 
191d6aca350SJuergen Gross 	for (i = 0; i < n_pages; i++) {
192d6aca350SJuergen Gross 		gnttab_grant_foreign_access_ref(grant + i, data->backend_domid,
193875553e3SOleksandr Tyshchenko 				pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset),
194875553e3SOleksandr Tyshchenko 				dir == DMA_TO_DEVICE);
195d6aca350SJuergen Gross 	}
196d6aca350SJuergen Gross 
197875553e3SOleksandr Tyshchenko 	dma_handle = grant_to_dma(grant) + dma_offset;
198d6aca350SJuergen Gross 
199d6aca350SJuergen Gross 	return dma_handle;
200d6aca350SJuergen Gross }
201d6aca350SJuergen Gross 
xen_grant_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)202d6aca350SJuergen Gross static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
203d6aca350SJuergen Gross 				     size_t size, enum dma_data_direction dir,
204d6aca350SJuergen Gross 				     unsigned long attrs)
205d6aca350SJuergen Gross {
206d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
207a383dcb1SOleksandr Tyshchenko 	unsigned long dma_offset = xen_offset_in_page(dma_handle);
208a383dcb1SOleksandr Tyshchenko 	unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size);
209d6aca350SJuergen Gross 	grant_ref_t grant;
210d6aca350SJuergen Gross 
211d6aca350SJuergen Gross 	if (WARN_ON(dir == DMA_NONE))
212d6aca350SJuergen Gross 		return;
213d6aca350SJuergen Gross 
214d6aca350SJuergen Gross 	data = find_xen_grant_dma_data(dev);
215d6aca350SJuergen Gross 	if (!data)
216d6aca350SJuergen Gross 		return;
217d6aca350SJuergen Gross 
218d6aca350SJuergen Gross 	if (unlikely(data->broken))
219d6aca350SJuergen Gross 		return;
220d6aca350SJuergen Gross 
221d6aca350SJuergen Gross 	grant = dma_to_grant(dma_handle);
222d6aca350SJuergen Gross 
223d6aca350SJuergen Gross 	for (i = 0; i < n_pages; i++) {
224d6aca350SJuergen Gross 		if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) {
225d6aca350SJuergen Gross 			dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n");
226d6aca350SJuergen Gross 			data->broken = true;
227d6aca350SJuergen Gross 			return;
228d6aca350SJuergen Gross 		}
229d6aca350SJuergen Gross 	}
230d6aca350SJuergen Gross 
231d6aca350SJuergen Gross 	gnttab_free_grant_reference_seq(grant, n_pages);
232d6aca350SJuergen Gross }
233d6aca350SJuergen Gross 
xen_grant_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)234d6aca350SJuergen Gross static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
235d6aca350SJuergen Gross 				   int nents, enum dma_data_direction dir,
236d6aca350SJuergen Gross 				   unsigned long attrs)
237d6aca350SJuergen Gross {
238d6aca350SJuergen Gross 	struct scatterlist *s;
239d6aca350SJuergen Gross 	unsigned int i;
240d6aca350SJuergen Gross 
241d6aca350SJuergen Gross 	if (WARN_ON(dir == DMA_NONE))
242d6aca350SJuergen Gross 		return;
243d6aca350SJuergen Gross 
244d6aca350SJuergen Gross 	for_each_sg(sg, s, nents, i)
245d6aca350SJuergen Gross 		xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir,
246d6aca350SJuergen Gross 				attrs);
247d6aca350SJuergen Gross }
248d6aca350SJuergen Gross 
xen_grant_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)249d6aca350SJuergen Gross static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg,
250d6aca350SJuergen Gross 				int nents, enum dma_data_direction dir,
251d6aca350SJuergen Gross 				unsigned long attrs)
252d6aca350SJuergen Gross {
253d6aca350SJuergen Gross 	struct scatterlist *s;
254d6aca350SJuergen Gross 	unsigned int i;
255d6aca350SJuergen Gross 
256d6aca350SJuergen Gross 	if (WARN_ON(dir == DMA_NONE))
257d6aca350SJuergen Gross 		return -EINVAL;
258d6aca350SJuergen Gross 
259d6aca350SJuergen Gross 	for_each_sg(sg, s, nents, i) {
260d6aca350SJuergen Gross 		s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset,
261d6aca350SJuergen Gross 				s->length, dir, attrs);
262d6aca350SJuergen Gross 		if (s->dma_address == DMA_MAPPING_ERROR)
263d6aca350SJuergen Gross 			goto out;
264d6aca350SJuergen Gross 
265d6aca350SJuergen Gross 		sg_dma_len(s) = s->length;
266d6aca350SJuergen Gross 	}
267d6aca350SJuergen Gross 
268d6aca350SJuergen Gross 	return nents;
269d6aca350SJuergen Gross 
270d6aca350SJuergen Gross out:
271d6aca350SJuergen Gross 	xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
272d6aca350SJuergen Gross 	sg_dma_len(sg) = 0;
273d6aca350SJuergen Gross 
274d6aca350SJuergen Gross 	return -EIO;
275d6aca350SJuergen Gross }
276d6aca350SJuergen Gross 
xen_grant_dma_supported(struct device * dev,u64 mask)277d6aca350SJuergen Gross static int xen_grant_dma_supported(struct device *dev, u64 mask)
278d6aca350SJuergen Gross {
279d6aca350SJuergen Gross 	return mask == DMA_BIT_MASK(64);
280d6aca350SJuergen Gross }
281d6aca350SJuergen Gross 
282d6aca350SJuergen Gross static const struct dma_map_ops xen_grant_dma_ops = {
283d6aca350SJuergen Gross 	.alloc = xen_grant_dma_alloc,
284d6aca350SJuergen Gross 	.free = xen_grant_dma_free,
285d6aca350SJuergen Gross 	.alloc_pages = xen_grant_dma_alloc_pages,
286d6aca350SJuergen Gross 	.free_pages = xen_grant_dma_free_pages,
287d6aca350SJuergen Gross 	.mmap = dma_common_mmap,
288d6aca350SJuergen Gross 	.get_sgtable = dma_common_get_sgtable,
289d6aca350SJuergen Gross 	.map_page = xen_grant_dma_map_page,
290d6aca350SJuergen Gross 	.unmap_page = xen_grant_dma_unmap_page,
291d6aca350SJuergen Gross 	.map_sg = xen_grant_dma_map_sg,
292d6aca350SJuergen Gross 	.unmap_sg = xen_grant_dma_unmap_sg,
293d6aca350SJuergen Gross 	.dma_supported = xen_grant_dma_supported,
294d6aca350SJuergen Gross };
295d6aca350SJuergen Gross 
xen_dt_get_node(struct device * dev)296ef8ae384SOleksandr Tyshchenko static struct device_node *xen_dt_get_node(struct device *dev)
297ef8ae384SOleksandr Tyshchenko {
298ef8ae384SOleksandr Tyshchenko 	if (dev_is_pci(dev)) {
299ef8ae384SOleksandr Tyshchenko 		struct pci_dev *pdev = to_pci_dev(dev);
300ef8ae384SOleksandr Tyshchenko 		struct pci_bus *bus = pdev->bus;
301ef8ae384SOleksandr Tyshchenko 
302ef8ae384SOleksandr Tyshchenko 		/* Walk up to the root bus to look for PCI Host controller */
303ef8ae384SOleksandr Tyshchenko 		while (!pci_is_root_bus(bus))
304ef8ae384SOleksandr Tyshchenko 			bus = bus->parent;
305ef8ae384SOleksandr Tyshchenko 
306*21a235bcSPetr Pavlu 		if (!bus->bridge->parent)
307*21a235bcSPetr Pavlu 			return NULL;
308ef8ae384SOleksandr Tyshchenko 		return of_node_get(bus->bridge->parent->of_node);
309ef8ae384SOleksandr Tyshchenko 	}
310ef8ae384SOleksandr Tyshchenko 
311ef8ae384SOleksandr Tyshchenko 	return of_node_get(dev->of_node);
312ef8ae384SOleksandr Tyshchenko }
313ef8ae384SOleksandr Tyshchenko 
xen_dt_grant_init_backend_domid(struct device * dev,struct device_node * np,domid_t * backend_domid)314c9133112SJuergen Gross static int xen_dt_grant_init_backend_domid(struct device *dev,
315ef8ae384SOleksandr Tyshchenko 					   struct device_node *np,
316035e3a43SOleksandr Tyshchenko 					   domid_t *backend_domid)
317c9133112SJuergen Gross {
318ef8ae384SOleksandr Tyshchenko 	struct of_phandle_args iommu_spec = { .args_count = 1 };
319c9133112SJuergen Gross 
320ef8ae384SOleksandr Tyshchenko 	if (dev_is_pci(dev)) {
321ef8ae384SOleksandr Tyshchenko 		struct pci_dev *pdev = to_pci_dev(dev);
322ef8ae384SOleksandr Tyshchenko 		u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
323ef8ae384SOleksandr Tyshchenko 
324ef8ae384SOleksandr Tyshchenko 		if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
325ef8ae384SOleksandr Tyshchenko 				iommu_spec.args)) {
326ef8ae384SOleksandr Tyshchenko 			dev_dbg(dev, "Cannot translate ID\n");
327ef8ae384SOleksandr Tyshchenko 			return -ESRCH;
328ef8ae384SOleksandr Tyshchenko 		}
329ef8ae384SOleksandr Tyshchenko 	} else {
330ef8ae384SOleksandr Tyshchenko 		if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
331c9133112SJuergen Gross 				0, &iommu_spec)) {
332035e3a43SOleksandr Tyshchenko 			dev_dbg(dev, "Cannot parse iommus property\n");
333c9133112SJuergen Gross 			return -ESRCH;
334c9133112SJuergen Gross 		}
335ef8ae384SOleksandr Tyshchenko 	}
336c9133112SJuergen Gross 
337c9133112SJuergen Gross 	if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
338c9133112SJuergen Gross 			iommu_spec.args_count != 1) {
339035e3a43SOleksandr Tyshchenko 		dev_dbg(dev, "Incompatible IOMMU node\n");
340c9133112SJuergen Gross 		of_node_put(iommu_spec.np);
341c9133112SJuergen Gross 		return -ESRCH;
342c9133112SJuergen Gross 	}
343c9133112SJuergen Gross 
344c9133112SJuergen Gross 	of_node_put(iommu_spec.np);
345c9133112SJuergen Gross 
346c9133112SJuergen Gross 	/*
347c9133112SJuergen Gross 	 * The endpoint ID here means the ID of the domain where the
348c9133112SJuergen Gross 	 * corresponding backend is running
349c9133112SJuergen Gross 	 */
350035e3a43SOleksandr Tyshchenko 	*backend_domid = iommu_spec.args[0];
351c9133112SJuergen Gross 
352c9133112SJuergen Gross 	return 0;
353c9133112SJuergen Gross }
354c9133112SJuergen Gross 
xen_grant_init_backend_domid(struct device * dev,domid_t * backend_domid)355035e3a43SOleksandr Tyshchenko static int xen_grant_init_backend_domid(struct device *dev,
356035e3a43SOleksandr Tyshchenko 					domid_t *backend_domid)
357035e3a43SOleksandr Tyshchenko {
358ef8ae384SOleksandr Tyshchenko 	struct device_node *np;
359035e3a43SOleksandr Tyshchenko 	int ret = -ENODEV;
360035e3a43SOleksandr Tyshchenko 
361ef8ae384SOleksandr Tyshchenko 	np = xen_dt_get_node(dev);
362ef8ae384SOleksandr Tyshchenko 	if (np) {
363ef8ae384SOleksandr Tyshchenko 		ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
364ef8ae384SOleksandr Tyshchenko 		of_node_put(np);
365035e3a43SOleksandr Tyshchenko 	} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
366035e3a43SOleksandr Tyshchenko 		dev_info(dev, "Using dom0 as backend\n");
367035e3a43SOleksandr Tyshchenko 		*backend_domid = 0;
368035e3a43SOleksandr Tyshchenko 		ret = 0;
369035e3a43SOleksandr Tyshchenko 	}
370035e3a43SOleksandr Tyshchenko 
371035e3a43SOleksandr Tyshchenko 	return ret;
372035e3a43SOleksandr Tyshchenko }
373035e3a43SOleksandr Tyshchenko 
xen_grant_setup_dma_ops(struct device * dev,domid_t backend_domid)374035e3a43SOleksandr Tyshchenko static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
375d6aca350SJuergen Gross {
376d6aca350SJuergen Gross 	struct xen_grant_dma_data *data;
377d6aca350SJuergen Gross 
378d6aca350SJuergen Gross 	data = find_xen_grant_dma_data(dev);
379d6aca350SJuergen Gross 	if (data) {
380d6aca350SJuergen Gross 		dev_err(dev, "Xen grant DMA data is already created\n");
381d6aca350SJuergen Gross 		return;
382d6aca350SJuergen Gross 	}
383d6aca350SJuergen Gross 
384d6aca350SJuergen Gross 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
385d6aca350SJuergen Gross 	if (!data)
386d6aca350SJuergen Gross 		goto err;
387d6aca350SJuergen Gross 
388035e3a43SOleksandr Tyshchenko 	data->backend_domid = backend_domid;
389d6aca350SJuergen Gross 
39077be00f1SOleksandr Tyshchenko 	if (store_xen_grant_dma_data(dev, data)) {
391d6aca350SJuergen Gross 		dev_err(dev, "Cannot store Xen grant DMA data\n");
392d6aca350SJuergen Gross 		goto err;
393d6aca350SJuergen Gross 	}
394d6aca350SJuergen Gross 
395d6aca350SJuergen Gross 	dev->dma_ops = &xen_grant_dma_ops;
396d6aca350SJuergen Gross 
397d6aca350SJuergen Gross 	return;
398d6aca350SJuergen Gross 
399d6aca350SJuergen Gross err:
400c9133112SJuergen Gross 	devm_kfree(dev, data);
401d6aca350SJuergen Gross 	dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n");
402d6aca350SJuergen Gross }
403d6aca350SJuergen Gross 
xen_virtio_restricted_mem_acc(struct virtio_device * dev)40461367688SJuergen Gross bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
40561367688SJuergen Gross {
406035e3a43SOleksandr Tyshchenko 	domid_t backend_domid;
40761367688SJuergen Gross 
408035e3a43SOleksandr Tyshchenko 	if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
409035e3a43SOleksandr Tyshchenko 		xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
410035e3a43SOleksandr Tyshchenko 		return true;
411035e3a43SOleksandr Tyshchenko 	}
41261367688SJuergen Gross 
413035e3a43SOleksandr Tyshchenko 	return false;
41461367688SJuergen Gross }
41561367688SJuergen Gross 
416d6aca350SJuergen Gross MODULE_DESCRIPTION("Xen grant DMA-mapping layer");
417d6aca350SJuergen Gross MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
418d6aca350SJuergen Gross MODULE_LICENSE("GPL");
419