1d9523678SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2b097186fSKonrad Rzeszutek Wilk /*
3b097186fSKonrad Rzeszutek Wilk * Copyright 2010
4b097186fSKonrad Rzeszutek Wilk * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5b097186fSKonrad Rzeszutek Wilk *
6b097186fSKonrad Rzeszutek Wilk * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7b097186fSKonrad Rzeszutek Wilk *
8b097186fSKonrad Rzeszutek Wilk * PV guests under Xen are running in an non-contiguous memory architecture.
9b097186fSKonrad Rzeszutek Wilk *
10b097186fSKonrad Rzeszutek Wilk * When PCI pass-through is utilized, this necessitates an IOMMU for
11b097186fSKonrad Rzeszutek Wilk * translating bus (DMA) to virtual and vice-versa and also providing a
12b097186fSKonrad Rzeszutek Wilk * mechanism to have contiguous pages for device drivers operations (say DMA
13b097186fSKonrad Rzeszutek Wilk * operations).
14b097186fSKonrad Rzeszutek Wilk *
15b097186fSKonrad Rzeszutek Wilk * Specifically, under Xen the Linux idea of pages is an illusion. It
16b097186fSKonrad Rzeszutek Wilk * assumes that pages start at zero and go up to the available memory. To
17b097186fSKonrad Rzeszutek Wilk * help with that, the Linux Xen MMU provides a lookup mechanism to
18b097186fSKonrad Rzeszutek Wilk * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19b097186fSKonrad Rzeszutek Wilk * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20b097186fSKonrad Rzeszutek Wilk * memory is not contiguous. Xen hypervisor stitches memory for guests
21b097186fSKonrad Rzeszutek Wilk * from different pools, which means there is no guarantee that PFN==MFN
22b097186fSKonrad Rzeszutek Wilk * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23b097186fSKonrad Rzeszutek Wilk * allocated in descending order (high to low), meaning the guest might
24b097186fSKonrad Rzeszutek Wilk * never get any MFN's under the 4GB mark.
25b097186fSKonrad Rzeszutek Wilk */
26b097186fSKonrad Rzeszutek Wilk
27283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28283c0972SJoe Perches
292013288fSMike Rapoport #include <linux/memblock.h>
30ea8c64acSChristoph Hellwig #include <linux/dma-direct.h>
319f4df96bSChristoph Hellwig #include <linux/dma-map-ops.h>
3263c9744bSPaul Gortmaker #include <linux/export.h>
33b097186fSKonrad Rzeszutek Wilk #include <xen/swiotlb-xen.h>
34b097186fSKonrad Rzeszutek Wilk #include <xen/page.h>
35b097186fSKonrad Rzeszutek Wilk #include <xen/xen-ops.h>
36f4b2f07bSKonrad Rzeszutek Wilk #include <xen/hvc-console.h>
372b2b614dSZoltan Kiss
3883862ccfSStefano Stabellini #include <asm/dma-mapping.h>
39e1d8f62aSKonrad Rzeszutek Wilk
402b2b614dSZoltan Kiss #include <trace/events/swiotlb.h>
41e6fa0dc8SSouptick Joarder #define MAX_DMA_BITS 32
42b097186fSKonrad Rzeszutek Wilk
43b097186fSKonrad Rzeszutek Wilk /*
44b097186fSKonrad Rzeszutek Wilk * Quick lookup value of the bus address of the IOTLB.
45b097186fSKonrad Rzeszutek Wilk */
46b097186fSKonrad Rzeszutek Wilk
xen_phys_to_bus(struct device * dev,phys_addr_t paddr)4791ffe4adSStefano Stabellini static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
48b097186fSKonrad Rzeszutek Wilk {
499435cce8SJulien Grall unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
5091ffe4adSStefano Stabellini phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
51e17b2f11SIan Campbell
5291ffe4adSStefano Stabellini baddr |= paddr & ~XEN_PAGE_MASK;
5391ffe4adSStefano Stabellini return baddr;
54b097186fSKonrad Rzeszutek Wilk }
55b097186fSKonrad Rzeszutek Wilk
xen_phys_to_dma(struct device * dev,phys_addr_t paddr)5691ffe4adSStefano Stabellini static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
5791ffe4adSStefano Stabellini {
5891ffe4adSStefano Stabellini return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
5991ffe4adSStefano Stabellini }
6091ffe4adSStefano Stabellini
xen_bus_to_phys(struct device * dev,phys_addr_t baddr)6191ffe4adSStefano Stabellini static inline phys_addr_t xen_bus_to_phys(struct device *dev,
6291ffe4adSStefano Stabellini phys_addr_t baddr)
63b097186fSKonrad Rzeszutek Wilk {
649435cce8SJulien Grall unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
6591ffe4adSStefano Stabellini phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
6691ffe4adSStefano Stabellini (baddr & ~XEN_PAGE_MASK);
67e17b2f11SIan Campbell
68e17b2f11SIan Campbell return paddr;
69b097186fSKonrad Rzeszutek Wilk }
70b097186fSKonrad Rzeszutek Wilk
xen_dma_to_phys(struct device * dev,dma_addr_t dma_addr)7191ffe4adSStefano Stabellini static inline phys_addr_t xen_dma_to_phys(struct device *dev,
7291ffe4adSStefano Stabellini dma_addr_t dma_addr)
7391ffe4adSStefano Stabellini {
7491ffe4adSStefano Stabellini return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
7591ffe4adSStefano Stabellini }
7691ffe4adSStefano Stabellini
range_requires_alignment(phys_addr_t p,size_t size)77461d9e8aSJuergen Gross static inline bool range_requires_alignment(phys_addr_t p, size_t size)
78461d9e8aSJuergen Gross {
79461d9e8aSJuergen Gross phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
80461d9e8aSJuergen Gross phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
81461d9e8aSJuergen Gross
82461d9e8aSJuergen Gross return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
83461d9e8aSJuergen Gross }
84461d9e8aSJuergen Gross
range_straddles_page_boundary(phys_addr_t p,size_t size)856b42a7eaSStefano Stabellini static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
86b097186fSKonrad Rzeszutek Wilk {
87bf707266SJuergen Gross unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
88bf707266SJuergen Gross unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
89b097186fSKonrad Rzeszutek Wilk
90bf707266SJuergen Gross next_bfn = pfn_to_bfn(xen_pfn);
91bf707266SJuergen Gross
92bf707266SJuergen Gross for (i = 1; i < nr_pages; i++)
93bf707266SJuergen Gross if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
94b097186fSKonrad Rzeszutek Wilk return 1;
95bf707266SJuergen Gross
96bf707266SJuergen Gross return 0;
97b097186fSKonrad Rzeszutek Wilk }
98b097186fSKonrad Rzeszutek Wilk
is_xen_swiotlb_buffer(struct device * dev,dma_addr_t dma_addr)9938ba51deSStefano Stabellini static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
100b097186fSKonrad Rzeszutek Wilk {
10191ffe4adSStefano Stabellini unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
1029435cce8SJulien Grall unsigned long xen_pfn = bfn_to_local_pfn(bfn);
103e9aab7e4SStefano Stabellini phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
104b097186fSKonrad Rzeszutek Wilk
105b097186fSKonrad Rzeszutek Wilk /* If the address is outside our domain, it CAN
106b097186fSKonrad Rzeszutek Wilk * have the same virtual address as another address
107b097186fSKonrad Rzeszutek Wilk * in our domain. Therefore _only_ check address within our domain.
108b097186fSKonrad Rzeszutek Wilk */
10916bc75f3SChristoph Hellwig if (pfn_valid(PFN_DOWN(paddr)))
1107fd856aaSClaire Chang return is_swiotlb_buffer(dev, paddr);
111b097186fSKonrad Rzeszutek Wilk return 0;
112b097186fSKonrad Rzeszutek Wilk }
113b097186fSKonrad Rzeszutek Wilk
114566fb90eSChristoph Hellwig #ifdef CONFIG_X86
xen_swiotlb_fixup(void * buf,unsigned long nslabs)115*45a5d06eSJan Beulich int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
116b097186fSKonrad Rzeszutek Wilk {
117d9a688adSJan Beulich int rc;
118d9a688adSJan Beulich unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
119d9a688adSJan Beulich unsigned int i, dma_bits = order + PAGE_SHIFT;
12069908907SStefano Stabellini dma_addr_t dma_handle;
1211b65c4e5SStefano Stabellini phys_addr_t p = virt_to_phys(buf);
122b097186fSKonrad Rzeszutek Wilk
123d9a688adSJan Beulich BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
124d9a688adSJan Beulich BUG_ON(nslabs % IO_TLB_SEGSIZE);
125b097186fSKonrad Rzeszutek Wilk
126b097186fSKonrad Rzeszutek Wilk i = 0;
127b097186fSKonrad Rzeszutek Wilk do {
128b097186fSKonrad Rzeszutek Wilk do {
129b097186fSKonrad Rzeszutek Wilk rc = xen_create_contiguous_region(
130d9a688adSJan Beulich p + (i << IO_TLB_SHIFT), order,
13169908907SStefano Stabellini dma_bits, &dma_handle);
132e6fa0dc8SSouptick Joarder } while (rc && dma_bits++ < MAX_DMA_BITS);
133b097186fSKonrad Rzeszutek Wilk if (rc)
134b097186fSKonrad Rzeszutek Wilk return rc;
135b097186fSKonrad Rzeszutek Wilk
136d9a688adSJan Beulich i += IO_TLB_SEGSIZE;
137b097186fSKonrad Rzeszutek Wilk } while (i < nslabs);
138b097186fSKonrad Rzeszutek Wilk return 0;
139b097186fSKonrad Rzeszutek Wilk }
1405bab7864SKonrad Rzeszutek Wilk
141dceb1a68SChristoph Hellwig static void *
xen_swiotlb_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flags,unsigned long attrs)142566fb90eSChristoph Hellwig xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
143566fb90eSChristoph Hellwig dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
144b097186fSKonrad Rzeszutek Wilk {
145566fb90eSChristoph Hellwig u64 dma_mask = dev->coherent_dma_mask;
146b097186fSKonrad Rzeszutek Wilk int order = get_order(size);
1476810df88SKonrad Rzeszutek Wilk phys_addr_t phys;
148566fb90eSChristoph Hellwig void *ret;
149b097186fSKonrad Rzeszutek Wilk
150566fb90eSChristoph Hellwig /* Align the allocation to the Xen page size */
1519c21cdaeSJuergen Gross size = ALIGN(size, XEN_PAGE_SIZE);
1527250f422SJoe Jin
153566fb90eSChristoph Hellwig ret = (void *)__get_free_pages(flags, get_order(size));
1546810df88SKonrad Rzeszutek Wilk if (!ret)
1556810df88SKonrad Rzeszutek Wilk return ret;
156566fb90eSChristoph Hellwig phys = virt_to_phys(ret);
157b097186fSKonrad Rzeszutek Wilk
158566fb90eSChristoph Hellwig *dma_handle = xen_phys_to_dma(dev, phys);
159566fb90eSChristoph Hellwig if (*dma_handle + size - 1 > dma_mask ||
160461d9e8aSJuergen Gross range_straddles_page_boundary(phys, size) ||
161461d9e8aSJuergen Gross range_requires_alignment(phys, size)) {
162566fb90eSChristoph Hellwig if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
163566fb90eSChristoph Hellwig dma_handle) != 0)
164566fb90eSChristoph Hellwig goto out_free_pages;
165b877ac98SJuergen Gross SetPageXenRemapped(virt_to_page(ret));
166b097186fSKonrad Rzeszutek Wilk }
167566fb90eSChristoph Hellwig
1686810df88SKonrad Rzeszutek Wilk memset(ret, 0, size);
169b097186fSKonrad Rzeszutek Wilk return ret;
170566fb90eSChristoph Hellwig
171566fb90eSChristoph Hellwig out_free_pages:
172566fb90eSChristoph Hellwig free_pages((unsigned long)ret, get_order(size));
173566fb90eSChristoph Hellwig return NULL;
174b097186fSKonrad Rzeszutek Wilk }
175b097186fSKonrad Rzeszutek Wilk
176dceb1a68SChristoph Hellwig static void
xen_swiotlb_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)177566fb90eSChristoph Hellwig xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
178566fb90eSChristoph Hellwig dma_addr_t dma_handle, unsigned long attrs)
179b097186fSKonrad Rzeszutek Wilk {
180566fb90eSChristoph Hellwig phys_addr_t phys = virt_to_phys(vaddr);
181b097186fSKonrad Rzeszutek Wilk int order = get_order(size);
1826810df88SKonrad Rzeszutek Wilk
1837250f422SJoe Jin /* Convert the size to actually allocated. */
1849c21cdaeSJuergen Gross size = ALIGN(size, XEN_PAGE_SIZE);
1857250f422SJoe Jin
186566fb90eSChristoph Hellwig if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
187461d9e8aSJuergen Gross WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
188461d9e8aSJuergen Gross range_requires_alignment(phys, size)))
189566fb90eSChristoph Hellwig return;
1908b1e868fSBoris Ostrovsky
191566fb90eSChristoph Hellwig if (TestClearPageXenRemapped(virt_to_page(vaddr)))
1921b65c4e5SStefano Stabellini xen_destroy_contiguous_region(phys, order);
193566fb90eSChristoph Hellwig free_pages((unsigned long)vaddr, get_order(size));
194b097186fSKonrad Rzeszutek Wilk }
195566fb90eSChristoph Hellwig #endif /* CONFIG_X86 */
196b097186fSKonrad Rzeszutek Wilk
197b097186fSKonrad Rzeszutek Wilk /*
198b097186fSKonrad Rzeszutek Wilk * Map a single buffer of the indicated size for DMA in streaming mode. The
199b097186fSKonrad Rzeszutek Wilk * physical address to use is returned.
200b097186fSKonrad Rzeszutek Wilk *
201b097186fSKonrad Rzeszutek Wilk * Once the device is given the dma address, the device owns this memory until
202b097186fSKonrad Rzeszutek Wilk * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
203b097186fSKonrad Rzeszutek Wilk */
xen_swiotlb_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)204dceb1a68SChristoph Hellwig static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
205b097186fSKonrad Rzeszutek Wilk unsigned long offset, size_t size,
206b097186fSKonrad Rzeszutek Wilk enum dma_data_direction dir,
20700085f1eSKrzysztof Kozlowski unsigned long attrs)
208b097186fSKonrad Rzeszutek Wilk {
209e05ed4d1SAlexander Duyck phys_addr_t map, phys = page_to_phys(page) + offset;
21091ffe4adSStefano Stabellini dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
211b097186fSKonrad Rzeszutek Wilk
212b097186fSKonrad Rzeszutek Wilk BUG_ON(dir == DMA_NONE);
213b097186fSKonrad Rzeszutek Wilk /*
214b097186fSKonrad Rzeszutek Wilk * If the address happens to be in the device's DMA window,
215b097186fSKonrad Rzeszutek Wilk * we can safely return the device addr and not worry about bounce
216b097186fSKonrad Rzeszutek Wilk * buffering it.
217b097186fSKonrad Rzeszutek Wilk */
21868a33b17SChristoph Hellwig if (dma_capable(dev, dev_addr, size, true) &&
219a4dba130SStefano Stabellini !range_straddles_page_boundary(phys, size) &&
220291be10fSJulien Grall !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
221903cd0f3SClaire Chang !is_swiotlb_force_bounce(dev))
222063b8271SChristoph Hellwig goto done;
223b097186fSKonrad Rzeszutek Wilk
224b097186fSKonrad Rzeszutek Wilk /*
225b097186fSKonrad Rzeszutek Wilk * Oh well, have to allocate and map a bounce buffer.
226b097186fSKonrad Rzeszutek Wilk */
227c6af2aa9SChristoph Hellwig trace_swiotlb_bounced(dev, dev_addr, size);
2282b2b614dSZoltan Kiss
229e81e99baSDavid Stevens map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
2309c106119SArnd Bergmann if (map == (phys_addr_t)DMA_MAPPING_ERROR)
231a4abe0adSChristoph Hellwig return DMA_MAPPING_ERROR;
232b097186fSKonrad Rzeszutek Wilk
233b4dca151SChristoph Hellwig phys = map;
23491ffe4adSStefano Stabellini dev_addr = xen_phys_to_dma(dev, map);
235b097186fSKonrad Rzeszutek Wilk
236b097186fSKonrad Rzeszutek Wilk /*
237b097186fSKonrad Rzeszutek Wilk * Ensure that the address returned is DMA'ble
238b097186fSKonrad Rzeszutek Wilk */
23968a33b17SChristoph Hellwig if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
2402973073aSChristoph Hellwig swiotlb_tbl_unmap_single(dev, map, size, dir,
241063b8271SChristoph Hellwig attrs | DMA_ATTR_SKIP_CPU_SYNC);
242a4abe0adSChristoph Hellwig return DMA_MAPPING_ERROR;
243b097186fSKonrad Rzeszutek Wilk }
244b097186fSKonrad Rzeszutek Wilk
245063b8271SChristoph Hellwig done:
24663f0620cSStefano Stabellini if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
24763f0620cSStefano Stabellini if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
24863f0620cSStefano Stabellini arch_sync_dma_for_device(phys, size, dir);
24963f0620cSStefano Stabellini else
25063f0620cSStefano Stabellini xen_dma_sync_for_device(dev, dev_addr, size, dir);
25163f0620cSStefano Stabellini }
252063b8271SChristoph Hellwig return dev_addr;
253063b8271SChristoph Hellwig }
254063b8271SChristoph Hellwig
255b097186fSKonrad Rzeszutek Wilk /*
256b097186fSKonrad Rzeszutek Wilk * Unmap a single streaming mode DMA translation. The dma_addr and size must
257b097186fSKonrad Rzeszutek Wilk * match what was provided for in a previous xen_swiotlb_map_page call. All
258b097186fSKonrad Rzeszutek Wilk * other usages are undefined.
259b097186fSKonrad Rzeszutek Wilk *
260b097186fSKonrad Rzeszutek Wilk * After this call, reads by the cpu to the buffer are guaranteed to see
261b097186fSKonrad Rzeszutek Wilk * whatever the device wrote there.
262b097186fSKonrad Rzeszutek Wilk */
xen_swiotlb_unmap_page(struct device * hwdev,dma_addr_t dev_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)263bf7954e7SChristoph Hellwig static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
264bf7954e7SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs)
265b097186fSKonrad Rzeszutek Wilk {
26691ffe4adSStefano Stabellini phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
267b097186fSKonrad Rzeszutek Wilk
268b097186fSKonrad Rzeszutek Wilk BUG_ON(dir == DMA_NONE);
269b097186fSKonrad Rzeszutek Wilk
27063f0620cSStefano Stabellini if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
27163f0620cSStefano Stabellini if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
27263f0620cSStefano Stabellini arch_sync_dma_for_cpu(paddr, size, dir);
27363f0620cSStefano Stabellini else
27463f0620cSStefano Stabellini xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
27563f0620cSStefano Stabellini }
2766cf05463SStefano Stabellini
277b097186fSKonrad Rzeszutek Wilk /* NOTE: We use dev_addr here, not paddr! */
27838ba51deSStefano Stabellini if (is_xen_swiotlb_buffer(hwdev, dev_addr))
2792973073aSChristoph Hellwig swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
280b097186fSKonrad Rzeszutek Wilk }
281b097186fSKonrad Rzeszutek Wilk
282b097186fSKonrad Rzeszutek Wilk static void
xen_swiotlb_sync_single_for_cpu(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)2832e12dceeSChristoph Hellwig xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
284b097186fSKonrad Rzeszutek Wilk size_t size, enum dma_data_direction dir)
285b097186fSKonrad Rzeszutek Wilk {
28691ffe4adSStefano Stabellini phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
2872e12dceeSChristoph Hellwig
28863f0620cSStefano Stabellini if (!dev_is_dma_coherent(dev)) {
28963f0620cSStefano Stabellini if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
29063f0620cSStefano Stabellini arch_sync_dma_for_cpu(paddr, size, dir);
29163f0620cSStefano Stabellini else
29263f0620cSStefano Stabellini xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
29363f0620cSStefano Stabellini }
2942e12dceeSChristoph Hellwig
29538ba51deSStefano Stabellini if (is_xen_swiotlb_buffer(dev, dma_addr))
29680808d27SChristoph Hellwig swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
297b097186fSKonrad Rzeszutek Wilk }
298b097186fSKonrad Rzeszutek Wilk
2992e12dceeSChristoph Hellwig static void
xen_swiotlb_sync_single_for_device(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)3002e12dceeSChristoph Hellwig xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
301b097186fSKonrad Rzeszutek Wilk size_t size, enum dma_data_direction dir)
302b097186fSKonrad Rzeszutek Wilk {
30391ffe4adSStefano Stabellini phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
3042e12dceeSChristoph Hellwig
30538ba51deSStefano Stabellini if (is_xen_swiotlb_buffer(dev, dma_addr))
30680808d27SChristoph Hellwig swiotlb_sync_single_for_device(dev, paddr, size, dir);
3072e12dceeSChristoph Hellwig
30863f0620cSStefano Stabellini if (!dev_is_dma_coherent(dev)) {
30963f0620cSStefano Stabellini if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
31063f0620cSStefano Stabellini arch_sync_dma_for_device(paddr, size, dir);
31163f0620cSStefano Stabellini else
31263f0620cSStefano Stabellini xen_dma_sync_for_device(dev, dma_addr, size, dir);
31363f0620cSStefano Stabellini }
314b097186fSKonrad Rzeszutek Wilk }
315dceb1a68SChristoph Hellwig
316dceb1a68SChristoph Hellwig /*
317dceb1a68SChristoph Hellwig * Unmap a set of streaming mode DMA translations. Again, cpu read rules
318dceb1a68SChristoph Hellwig * concerning calls here are the same as for swiotlb_unmap_page() above.
319dceb1a68SChristoph Hellwig */
320dceb1a68SChristoph Hellwig static void
xen_swiotlb_unmap_sg(struct device * hwdev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,unsigned long attrs)321aca351ccSChristoph Hellwig xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
322aca351ccSChristoph Hellwig enum dma_data_direction dir, unsigned long attrs)
323dceb1a68SChristoph Hellwig {
324dceb1a68SChristoph Hellwig struct scatterlist *sg;
325dceb1a68SChristoph Hellwig int i;
326dceb1a68SChristoph Hellwig
327dceb1a68SChristoph Hellwig BUG_ON(dir == DMA_NONE);
328dceb1a68SChristoph Hellwig
329dceb1a68SChristoph Hellwig for_each_sg(sgl, sg, nelems, i)
330bf7954e7SChristoph Hellwig xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
331bf7954e7SChristoph Hellwig dir, attrs);
332dceb1a68SChristoph Hellwig
333dceb1a68SChristoph Hellwig }
334b097186fSKonrad Rzeszutek Wilk
335dceb1a68SChristoph Hellwig static int
xen_swiotlb_map_sg(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir,unsigned long attrs)3368b35d9feSChristoph Hellwig xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
337aca351ccSChristoph Hellwig enum dma_data_direction dir, unsigned long attrs)
338b097186fSKonrad Rzeszutek Wilk {
339b097186fSKonrad Rzeszutek Wilk struct scatterlist *sg;
340b097186fSKonrad Rzeszutek Wilk int i;
341b097186fSKonrad Rzeszutek Wilk
342b097186fSKonrad Rzeszutek Wilk BUG_ON(dir == DMA_NONE);
343b097186fSKonrad Rzeszutek Wilk
344b097186fSKonrad Rzeszutek Wilk for_each_sg(sgl, sg, nelems, i) {
3458b35d9feSChristoph Hellwig sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
3468b35d9feSChristoph Hellwig sg->offset, sg->length, dir, attrs);
3478b35d9feSChristoph Hellwig if (sg->dma_address == DMA_MAPPING_ERROR)
3488b35d9feSChristoph Hellwig goto out_unmap;
349781575cdSStefano Stabellini sg_dma_len(sg) = sg->length;
350b097186fSKonrad Rzeszutek Wilk }
351b097186fSKonrad Rzeszutek Wilk
352b097186fSKonrad Rzeszutek Wilk return nelems;
3538b35d9feSChristoph Hellwig out_unmap:
3548b35d9feSChristoph Hellwig xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
355b097186fSKonrad Rzeszutek Wilk sg_dma_len(sgl) = 0;
3562c647ebeSMartin Oliveira return -EIO;
357b097186fSKonrad Rzeszutek Wilk }
358b097186fSKonrad Rzeszutek Wilk
359b097186fSKonrad Rzeszutek Wilk static void
xen_swiotlb_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)3602e12dceeSChristoph Hellwig xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
3612e12dceeSChristoph Hellwig int nelems, enum dma_data_direction dir)
362b097186fSKonrad Rzeszutek Wilk {
363b097186fSKonrad Rzeszutek Wilk struct scatterlist *sg;
364b097186fSKonrad Rzeszutek Wilk int i;
365b097186fSKonrad Rzeszutek Wilk
3662e12dceeSChristoph Hellwig for_each_sg(sgl, sg, nelems, i) {
3672e12dceeSChristoph Hellwig xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
3682e12dceeSChristoph Hellwig sg->length, dir);
3692e12dceeSChristoph Hellwig }
370b097186fSKonrad Rzeszutek Wilk }
371b097186fSKonrad Rzeszutek Wilk
372dceb1a68SChristoph Hellwig static void
xen_swiotlb_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)3732e12dceeSChristoph Hellwig xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
374b097186fSKonrad Rzeszutek Wilk int nelems, enum dma_data_direction dir)
375b097186fSKonrad Rzeszutek Wilk {
3762e12dceeSChristoph Hellwig struct scatterlist *sg;
3772e12dceeSChristoph Hellwig int i;
378b097186fSKonrad Rzeszutek Wilk
3792e12dceeSChristoph Hellwig for_each_sg(sgl, sg, nelems, i) {
3802e12dceeSChristoph Hellwig xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
3812e12dceeSChristoph Hellwig sg->length, dir);
3822e12dceeSChristoph Hellwig }
383b097186fSKonrad Rzeszutek Wilk }
384b097186fSKonrad Rzeszutek Wilk
385b097186fSKonrad Rzeszutek Wilk /*
386b097186fSKonrad Rzeszutek Wilk * Return whether the given device DMA address mask can be supported
387b097186fSKonrad Rzeszutek Wilk * properly. For example, if your device can only drive the low 24-bits
388b097186fSKonrad Rzeszutek Wilk * during bus mastering, then you would pass 0x00ffffff as the mask to
389b097186fSKonrad Rzeszutek Wilk * this function.
390b097186fSKonrad Rzeszutek Wilk */
391dceb1a68SChristoph Hellwig static int
xen_swiotlb_dma_supported(struct device * hwdev,u64 mask)392b097186fSKonrad Rzeszutek Wilk xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
393b097186fSKonrad Rzeszutek Wilk {
39405ee7741SPetr Tesarik return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
395b097186fSKonrad Rzeszutek Wilk }
396eb1ddc00SStefano Stabellini
397dceb1a68SChristoph Hellwig const struct dma_map_ops xen_swiotlb_dma_ops = {
398566fb90eSChristoph Hellwig #ifdef CONFIG_X86
399dceb1a68SChristoph Hellwig .alloc = xen_swiotlb_alloc_coherent,
400dceb1a68SChristoph Hellwig .free = xen_swiotlb_free_coherent,
401566fb90eSChristoph Hellwig #else
402566fb90eSChristoph Hellwig .alloc = dma_direct_alloc,
403566fb90eSChristoph Hellwig .free = dma_direct_free,
404566fb90eSChristoph Hellwig #endif
405dceb1a68SChristoph Hellwig .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
406dceb1a68SChristoph Hellwig .sync_single_for_device = xen_swiotlb_sync_single_for_device,
407dceb1a68SChristoph Hellwig .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
408dceb1a68SChristoph Hellwig .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
409aca351ccSChristoph Hellwig .map_sg = xen_swiotlb_map_sg,
410aca351ccSChristoph Hellwig .unmap_sg = xen_swiotlb_unmap_sg,
411dceb1a68SChristoph Hellwig .map_page = xen_swiotlb_map_page,
412dceb1a68SChristoph Hellwig .unmap_page = xen_swiotlb_unmap_page,
413dceb1a68SChristoph Hellwig .dma_supported = xen_swiotlb_dma_supported,
414922659eaSChristoph Hellwig .mmap = dma_common_mmap,
415922659eaSChristoph Hellwig .get_sgtable = dma_common_get_sgtable,
41605d2e16aSGreg Kroah-Hartman .alloc_pages = dma_common_alloc_pages,
417efa70f2fSChristoph Hellwig .free_pages = dma_common_free_pages,
418fe5b55bcSKeith Busch .max_mapping_size = swiotlb_max_mapping_size,
419dceb1a68SChristoph Hellwig };
420