xref: /openbmc/linux/arch/x86/include/asm/dma-mapping.h (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3 
4 /*
5  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6  * Documentation/DMA-API.txt for documentation.
7  */
8 
9 #include <linux/scatterlist.h>
10 #include <linux/dma-debug.h>
11 #include <linux/dma-attrs.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14 #include <asm-generic/dma-coherent.h>
15 
16 extern dma_addr_t bad_dma_address;
17 extern int iommu_merge;
18 extern struct device x86_dma_fallback_dev;
19 extern int panic_on_overflow;
20 
21 extern struct dma_map_ops *dma_ops;
22 
23 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
24 {
25 #ifdef CONFIG_X86_32
26 	return dma_ops;
27 #else
28 	if (unlikely(!dev) || !dev->archdata.dma_ops)
29 		return dma_ops;
30 	else
31 		return dev->archdata.dma_ops;
32 #endif
33 }
34 
35 /* Make sure we keep the same behaviour */
36 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
37 {
38 	struct dma_map_ops *ops = get_dma_ops(dev);
39 	if (ops->mapping_error)
40 		return ops->mapping_error(dev, dma_addr);
41 
42 	return (dma_addr == bad_dma_address);
43 }
44 
45 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
46 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
47 #define dma_is_consistent(d, h)	(1)
48 
49 extern int dma_supported(struct device *hwdev, u64 mask);
50 extern int dma_set_mask(struct device *dev, u64 mask);
51 
52 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
53 					dma_addr_t *dma_addr, gfp_t flag);
54 
55 static inline dma_addr_t
56 dma_map_single(struct device *hwdev, void *ptr, size_t size,
57 	       enum dma_data_direction dir)
58 {
59 	struct dma_map_ops *ops = get_dma_ops(hwdev);
60 	dma_addr_t addr;
61 
62 	BUG_ON(!valid_dma_direction(dir));
63 	addr = ops->map_page(hwdev, virt_to_page(ptr),
64 			     (unsigned long)ptr & ~PAGE_MASK, size,
65 			     dir, NULL);
66 	debug_dma_map_page(hwdev, virt_to_page(ptr),
67 			   (unsigned long)ptr & ~PAGE_MASK, size,
68 			   dir, addr, true);
69 	return addr;
70 }
71 
72 static inline void
73 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
74 		 enum dma_data_direction dir)
75 {
76 	struct dma_map_ops *ops = get_dma_ops(dev);
77 
78 	BUG_ON(!valid_dma_direction(dir));
79 	if (ops->unmap_page)
80 		ops->unmap_page(dev, addr, size, dir, NULL);
81 	debug_dma_unmap_page(dev, addr, size, dir, true);
82 }
83 
84 static inline int
85 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
86 	   int nents, enum dma_data_direction dir)
87 {
88 	struct dma_map_ops *ops = get_dma_ops(hwdev);
89 	int ents;
90 
91 	BUG_ON(!valid_dma_direction(dir));
92 	ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
93 	debug_dma_map_sg(hwdev, sg, nents, ents, dir);
94 
95 	return ents;
96 }
97 
98 static inline void
99 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
100 	     enum dma_data_direction dir)
101 {
102 	struct dma_map_ops *ops = get_dma_ops(hwdev);
103 
104 	BUG_ON(!valid_dma_direction(dir));
105 	debug_dma_unmap_sg(hwdev, sg, nents, dir);
106 	if (ops->unmap_sg)
107 		ops->unmap_sg(hwdev, sg, nents, dir, NULL);
108 }
109 
110 static inline void
111 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
112 			size_t size, enum dma_data_direction dir)
113 {
114 	struct dma_map_ops *ops = get_dma_ops(hwdev);
115 
116 	BUG_ON(!valid_dma_direction(dir));
117 	if (ops->sync_single_for_cpu)
118 		ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
119 	debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
120 	flush_write_buffers();
121 }
122 
123 static inline void
124 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
125 			   size_t size, enum dma_data_direction dir)
126 {
127 	struct dma_map_ops *ops = get_dma_ops(hwdev);
128 
129 	BUG_ON(!valid_dma_direction(dir));
130 	if (ops->sync_single_for_device)
131 		ops->sync_single_for_device(hwdev, dma_handle, size, dir);
132 	debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
133 	flush_write_buffers();
134 }
135 
136 static inline void
137 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
138 			      unsigned long offset, size_t size,
139 			      enum dma_data_direction dir)
140 {
141 	struct dma_map_ops *ops = get_dma_ops(hwdev);
142 
143 	BUG_ON(!valid_dma_direction(dir));
144 	if (ops->sync_single_range_for_cpu)
145 		ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
146 					       size, dir);
147 	debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
148 					    offset, size, dir);
149 	flush_write_buffers();
150 }
151 
152 static inline void
153 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
154 				 unsigned long offset, size_t size,
155 				 enum dma_data_direction dir)
156 {
157 	struct dma_map_ops *ops = get_dma_ops(hwdev);
158 
159 	BUG_ON(!valid_dma_direction(dir));
160 	if (ops->sync_single_range_for_device)
161 		ops->sync_single_range_for_device(hwdev, dma_handle,
162 						  offset, size, dir);
163 	debug_dma_sync_single_range_for_device(hwdev, dma_handle,
164 					       offset, size, dir);
165 	flush_write_buffers();
166 }
167 
168 static inline void
169 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
170 		    int nelems, enum dma_data_direction dir)
171 {
172 	struct dma_map_ops *ops = get_dma_ops(hwdev);
173 
174 	BUG_ON(!valid_dma_direction(dir));
175 	if (ops->sync_sg_for_cpu)
176 		ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
177 	debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
178 	flush_write_buffers();
179 }
180 
181 static inline void
182 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
183 		       int nelems, enum dma_data_direction dir)
184 {
185 	struct dma_map_ops *ops = get_dma_ops(hwdev);
186 
187 	BUG_ON(!valid_dma_direction(dir));
188 	if (ops->sync_sg_for_device)
189 		ops->sync_sg_for_device(hwdev, sg, nelems, dir);
190 	debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
191 
192 	flush_write_buffers();
193 }
194 
195 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
196 				      size_t offset, size_t size,
197 				      enum dma_data_direction dir)
198 {
199 	struct dma_map_ops *ops = get_dma_ops(dev);
200 	dma_addr_t addr;
201 
202 	BUG_ON(!valid_dma_direction(dir));
203 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
204 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
205 
206 	return addr;
207 }
208 
209 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
210 				  size_t size, enum dma_data_direction dir)
211 {
212 	struct dma_map_ops *ops = get_dma_ops(dev);
213 
214 	BUG_ON(!valid_dma_direction(dir));
215 	if (ops->unmap_page)
216 		ops->unmap_page(dev, addr, size, dir, NULL);
217 	debug_dma_unmap_page(dev, addr, size, dir, false);
218 }
219 
220 static inline void
221 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
222 	enum dma_data_direction dir)
223 {
224 	flush_write_buffers();
225 }
226 
227 static inline int dma_get_cache_alignment(void)
228 {
229 	/* no easy way to get cache size on all x86, so return the
230 	 * maximum possible, to be safe */
231 	return boot_cpu_data.x86_clflush_size;
232 }
233 
234 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
235 						    gfp_t gfp)
236 {
237 	unsigned long dma_mask = 0;
238 
239 	dma_mask = dev->coherent_dma_mask;
240 	if (!dma_mask)
241 		dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
242 
243 	return dma_mask;
244 }
245 
246 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
247 {
248 	unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
249 
250 	if (dma_mask <= DMA_24BIT_MASK)
251 		gfp |= GFP_DMA;
252 #ifdef CONFIG_X86_64
253 	if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
254 		gfp |= GFP_DMA32;
255 #endif
256        return gfp;
257 }
258 
259 static inline void *
260 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
261 		gfp_t gfp)
262 {
263 	struct dma_map_ops *ops = get_dma_ops(dev);
264 	void *memory;
265 
266 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
267 
268 	if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
269 		return memory;
270 
271 	if (!dev) {
272 		dev = &x86_dma_fallback_dev;
273 		gfp |= GFP_DMA;
274 	}
275 
276 	if (!is_device_dma_capable(dev))
277 		return NULL;
278 
279 	if (!ops->alloc_coherent)
280 		return NULL;
281 
282 	memory = ops->alloc_coherent(dev, size, dma_handle,
283 				     dma_alloc_coherent_gfp_flags(dev, gfp));
284 	debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
285 
286 	return memory;
287 }
288 
289 static inline void dma_free_coherent(struct device *dev, size_t size,
290 				     void *vaddr, dma_addr_t bus)
291 {
292 	struct dma_map_ops *ops = get_dma_ops(dev);
293 
294 	WARN_ON(irqs_disabled());       /* for portability */
295 
296 	if (dma_release_from_coherent(dev, get_order(size), vaddr))
297 		return;
298 
299 	debug_dma_free_coherent(dev, size, vaddr, bus);
300 	if (ops->free_coherent)
301 		ops->free_coherent(dev, size, vaddr, bus);
302 }
303 
304 #endif
305