xref: /openbmc/linux/arch/arm/xen/mm.c (revision 60772e48)
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/bootmem.h>
4 #include <linux/gfp.h>
5 #include <linux/highmem.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/of_address.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/vmalloc.h>
13 #include <linux/swiotlb.h>
14 
15 #include <xen/xen.h>
16 #include <xen/interface/grant_table.h>
17 #include <xen/interface/memory.h>
18 #include <xen/page.h>
19 #include <xen/swiotlb-xen.h>
20 
21 #include <asm/cacheflush.h>
22 #include <asm/xen/hypercall.h>
23 #include <asm/xen/interface.h>
24 
25 unsigned long xen_get_swiotlb_free_pages(unsigned int order)
26 {
27 	struct memblock_region *reg;
28 	gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
29 
30 	for_each_memblock(memory, reg) {
31 		if (reg->base < (phys_addr_t)0xffffffff) {
32 			flags |= __GFP_DMA;
33 			break;
34 		}
35 	}
36 	return __get_free_pages(flags, order);
37 }
38 
39 enum dma_cache_op {
40        DMA_UNMAP,
41        DMA_MAP,
42 };
43 static bool hypercall_cflush = false;
44 
45 /* functions called by SWIOTLB */
46 
47 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
48 	size_t size, enum dma_data_direction dir, enum dma_cache_op op)
49 {
50 	struct gnttab_cache_flush cflush;
51 	unsigned long xen_pfn;
52 	size_t left = size;
53 
54 	xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
55 	offset %= XEN_PAGE_SIZE;
56 
57 	do {
58 		size_t len = left;
59 
60 		/* buffers in highmem or foreign pages cannot cross page
61 		 * boundaries */
62 		if (len + offset > XEN_PAGE_SIZE)
63 			len = XEN_PAGE_SIZE - offset;
64 
65 		cflush.op = 0;
66 		cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
67 		cflush.offset = offset;
68 		cflush.length = len;
69 
70 		if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
71 			cflush.op = GNTTAB_CACHE_INVAL;
72 		if (op == DMA_MAP) {
73 			if (dir == DMA_FROM_DEVICE)
74 				cflush.op = GNTTAB_CACHE_INVAL;
75 			else
76 				cflush.op = GNTTAB_CACHE_CLEAN;
77 		}
78 		if (cflush.op)
79 			HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
80 
81 		offset = 0;
82 		xen_pfn++;
83 		left -= len;
84 	} while (left);
85 }
86 
87 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
88 		size_t size, enum dma_data_direction dir)
89 {
90 	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
91 }
92 
93 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
94 		size_t size, enum dma_data_direction dir)
95 {
96 	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
97 }
98 
99 void __xen_dma_map_page(struct device *hwdev, struct page *page,
100 	     dma_addr_t dev_addr, unsigned long offset, size_t size,
101 	     enum dma_data_direction dir, unsigned long attrs)
102 {
103 	if (is_device_dma_coherent(hwdev))
104 		return;
105 	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
106 		return;
107 
108 	__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109 }
110 
111 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
112 		size_t size, enum dma_data_direction dir,
113 		unsigned long attrs)
114 
115 {
116 	if (is_device_dma_coherent(hwdev))
117 		return;
118 	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
119 		return;
120 
121 	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122 }
123 
124 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
125 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
126 {
127 	if (is_device_dma_coherent(hwdev))
128 		return;
129 	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130 }
131 
132 void __xen_dma_sync_single_for_device(struct device *hwdev,
133 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
134 {
135 	if (is_device_dma_coherent(hwdev))
136 		return;
137 	__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
138 }
139 
140 bool xen_arch_need_swiotlb(struct device *dev,
141 			   phys_addr_t phys,
142 			   dma_addr_t dev_addr)
143 {
144 	unsigned int xen_pfn = XEN_PFN_DOWN(phys);
145 	unsigned int bfn = XEN_PFN_DOWN(dev_addr);
146 
147 	/*
148 	 * The swiotlb buffer should be used if
149 	 *	- Xen doesn't have the cache flush hypercall
150 	 *	- The Linux page refers to foreign memory
151 	 *	- The device doesn't support coherent DMA request
152 	 *
153 	 * The Linux page may be spanned acrros multiple Xen page, although
154 	 * it's not possible to have a mix of local and foreign Xen page.
155 	 * Furthermore, range_straddles_page_boundary is already checking
156 	 * if buffer is physically contiguous in the host RAM.
157 	 *
158 	 * Therefore we only need to check the first Xen page to know if we
159 	 * require a bounce buffer because the device doesn't support coherent
160 	 * memory and we are not able to flush the cache.
161 	 */
162 	return (!hypercall_cflush && (xen_pfn != bfn) &&
163 		!is_device_dma_coherent(dev));
164 }
165 
166 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
167 				 unsigned int address_bits,
168 				 dma_addr_t *dma_handle)
169 {
170 	if (!xen_initial_domain())
171 		return -EINVAL;
172 
173 	/* we assume that dom0 is mapped 1:1 for now */
174 	*dma_handle = pstart;
175 	return 0;
176 }
177 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
178 
179 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
180 {
181 	return;
182 }
183 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
184 
185 const struct dma_map_ops *xen_dma_ops;
186 EXPORT_SYMBOL(xen_dma_ops);
187 
188 int __init xen_mm_init(void)
189 {
190 	struct gnttab_cache_flush cflush;
191 	if (!xen_initial_domain())
192 		return 0;
193 	xen_swiotlb_init(1, false);
194 	xen_dma_ops = &xen_swiotlb_dma_ops;
195 
196 	cflush.op = 0;
197 	cflush.a.dev_bus_addr = 0;
198 	cflush.offset = 0;
199 	cflush.length = 0;
200 	if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
201 		hypercall_cflush = true;
202 	return 0;
203 }
204 arch_initcall(xen_mm_init);
205