xref: /openbmc/linux/arch/mips/mm/dma-noncoherent.c (revision 87c2ce3b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001  Ralf Baechle <ralf@gnu.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10 #include <linux/config.h>
11 #include <linux/types.h>
12 #include <linux/mm.h>
13 #include <linux/module.h>
14 #include <linux/string.h>
15 #include <linux/dma-mapping.h>
16 
17 #include <asm/cache.h>
18 #include <asm/io.h>
19 
20 /*
21  * Warning on the terminology - Linux calls an uncached area coherent;
22  * MIPS terminology calls memory areas with hardware maintained coherency
23  * coherent.
24  */
25 
26 void *dma_alloc_noncoherent(struct device *dev, size_t size,
27 	dma_addr_t * dma_handle, gfp_t gfp)
28 {
29 	void *ret;
30 	/* ignore region specifiers */
31 	gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
32 
33 	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
34 		gfp |= GFP_DMA;
35 	ret = (void *) __get_free_pages(gfp, get_order(size));
36 
37 	if (ret != NULL) {
38 		memset(ret, 0, size);
39 		*dma_handle = virt_to_phys(ret);
40 	}
41 
42 	return ret;
43 }
44 
45 EXPORT_SYMBOL(dma_alloc_noncoherent);
46 
47 void *dma_alloc_coherent(struct device *dev, size_t size,
48 	dma_addr_t * dma_handle, gfp_t gfp)
49 {
50 	void *ret;
51 
52 	ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
53 	if (ret) {
54 		dma_cache_wback_inv((unsigned long) ret, size);
55 		ret = UNCAC_ADDR(ret);
56 	}
57 
58 	return ret;
59 }
60 
61 EXPORT_SYMBOL(dma_alloc_coherent);
62 
63 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
64 	dma_addr_t dma_handle)
65 {
66 	free_pages((unsigned long) vaddr, get_order(size));
67 }
68 
69 EXPORT_SYMBOL(dma_free_noncoherent);
70 
71 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
72 	dma_addr_t dma_handle)
73 {
74 	unsigned long addr = (unsigned long) vaddr;
75 
76 	addr = CAC_ADDR(addr);
77 	free_pages(addr, get_order(size));
78 }
79 
80 EXPORT_SYMBOL(dma_free_coherent);
81 
82 static inline void __dma_sync(unsigned long addr, size_t size,
83 	enum dma_data_direction direction)
84 {
85 	switch (direction) {
86 	case DMA_TO_DEVICE:
87 		dma_cache_wback(addr, size);
88 		break;
89 
90 	case DMA_FROM_DEVICE:
91 		dma_cache_inv(addr, size);
92 		break;
93 
94 	case DMA_BIDIRECTIONAL:
95 		dma_cache_wback_inv(addr, size);
96 		break;
97 
98 	default:
99 		BUG();
100 	}
101 }
102 
103 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
104 	enum dma_data_direction direction)
105 {
106 	unsigned long addr = (unsigned long) ptr;
107 
108 	__dma_sync(addr, size, direction);
109 
110 	return virt_to_phys(ptr);
111 }
112 
113 EXPORT_SYMBOL(dma_map_single);
114 
115 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
116 	enum dma_data_direction direction)
117 {
118 	unsigned long addr;
119 	addr = dma_addr + PAGE_OFFSET;
120 
121 	//__dma_sync(addr, size, direction);
122 }
123 
124 EXPORT_SYMBOL(dma_unmap_single);
125 
126 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
127 	enum dma_data_direction direction)
128 {
129 	int i;
130 
131 	BUG_ON(direction == DMA_NONE);
132 
133 	for (i = 0; i < nents; i++, sg++) {
134 		unsigned long addr;
135 
136 		addr = (unsigned long) page_address(sg->page);
137 		if (addr) {
138 			__dma_sync(addr + sg->offset, sg->length, direction);
139 			sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
140 					  + sg->offset;
141 		}
142 	}
143 
144 	return nents;
145 }
146 
147 EXPORT_SYMBOL(dma_map_sg);
148 
149 dma_addr_t dma_map_page(struct device *dev, struct page *page,
150 	unsigned long offset, size_t size, enum dma_data_direction direction)
151 {
152 	unsigned long addr;
153 
154 	BUG_ON(direction == DMA_NONE);
155 
156 	addr = (unsigned long) page_address(page) + offset;
157 	dma_cache_wback_inv(addr, size);
158 
159 	return page_to_phys(page) + offset;
160 }
161 
162 EXPORT_SYMBOL(dma_map_page);
163 
164 void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
165 	enum dma_data_direction direction)
166 {
167 	BUG_ON(direction == DMA_NONE);
168 
169 	if (direction != DMA_TO_DEVICE) {
170 		unsigned long addr;
171 
172 		addr = dma_address + PAGE_OFFSET;
173 		dma_cache_wback_inv(addr, size);
174 	}
175 }
176 
177 EXPORT_SYMBOL(dma_unmap_page);
178 
179 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
180 	enum dma_data_direction direction)
181 {
182 	unsigned long addr;
183 	int i;
184 
185 	BUG_ON(direction == DMA_NONE);
186 
187 	if (direction == DMA_TO_DEVICE)
188 		return;
189 
190 	for (i = 0; i < nhwentries; i++, sg++) {
191 		addr = (unsigned long) page_address(sg->page);
192 		if (addr)
193 			__dma_sync(addr + sg->offset, sg->length, direction);
194 	}
195 }
196 
197 EXPORT_SYMBOL(dma_unmap_sg);
198 
199 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
200 	size_t size, enum dma_data_direction direction)
201 {
202 	unsigned long addr;
203 
204 	BUG_ON(direction == DMA_NONE);
205 
206 	addr = dma_handle + PAGE_OFFSET;
207 	__dma_sync(addr, size, direction);
208 }
209 
210 EXPORT_SYMBOL(dma_sync_single_for_cpu);
211 
212 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
213 	size_t size, enum dma_data_direction direction)
214 {
215 	unsigned long addr;
216 
217 	BUG_ON(direction == DMA_NONE);
218 
219 	addr = dma_handle + PAGE_OFFSET;
220 	__dma_sync(addr, size, direction);
221 }
222 
223 EXPORT_SYMBOL(dma_sync_single_for_device);
224 
225 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
226 	unsigned long offset, size_t size, enum dma_data_direction direction)
227 {
228 	unsigned long addr;
229 
230 	BUG_ON(direction == DMA_NONE);
231 
232 	addr = dma_handle + offset + PAGE_OFFSET;
233 	__dma_sync(addr, size, direction);
234 }
235 
236 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
237 
238 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
239 	unsigned long offset, size_t size, enum dma_data_direction direction)
240 {
241 	unsigned long addr;
242 
243 	BUG_ON(direction == DMA_NONE);
244 
245 	addr = dma_handle + offset + PAGE_OFFSET;
246 	__dma_sync(addr, size, direction);
247 }
248 
249 EXPORT_SYMBOL(dma_sync_single_range_for_device);
250 
251 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
252 	enum dma_data_direction direction)
253 {
254 	int i;
255 
256 	BUG_ON(direction == DMA_NONE);
257 
258 	/* Make sure that gcc doesn't leave the empty loop body.  */
259 	for (i = 0; i < nelems; i++, sg++)
260 		__dma_sync((unsigned long)page_address(sg->page),
261 		           sg->length, direction);
262 }
263 
264 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
265 
266 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
267 	enum dma_data_direction direction)
268 {
269 	int i;
270 
271 	BUG_ON(direction == DMA_NONE);
272 
273 	/* Make sure that gcc doesn't leave the empty loop body.  */
274 	for (i = 0; i < nelems; i++, sg++)
275 		__dma_sync((unsigned long)page_address(sg->page),
276 		           sg->length, direction);
277 }
278 
279 EXPORT_SYMBOL(dma_sync_sg_for_device);
280 
281 int dma_mapping_error(dma_addr_t dma_addr)
282 {
283 	return 0;
284 }
285 
286 EXPORT_SYMBOL(dma_mapping_error);
287 
288 int dma_supported(struct device *dev, u64 mask)
289 {
290 	/*
291 	 * we fall back to GFP_DMA when the mask isn't all 1s,
292 	 * so we can't guarantee allocations that must be
293 	 * within a tighter range than GFP_DMA..
294 	 */
295 	if (mask < 0x00ffffff)
296 		return 0;
297 
298 	return 1;
299 }
300 
301 EXPORT_SYMBOL(dma_supported);
302 
303 int dma_is_consistent(dma_addr_t dma_addr)
304 {
305 	return 1;
306 }
307 
308 EXPORT_SYMBOL(dma_is_consistent);
309 
310 void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
311 {
312 	if (direction == DMA_NONE)
313 		return;
314 
315 	dma_cache_wback_inv((unsigned long)vaddr, size);
316 }
317 
318 EXPORT_SYMBOL(dma_cache_sync);
319 
320 /* The DAC routines are a PCIism.. */
321 
322 #ifdef CONFIG_PCI
323 
324 #include <linux/pci.h>
325 
326 dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
327 	struct page *page, unsigned long offset, int direction)
328 {
329 	return (dma64_addr_t)page_to_phys(page) + offset;
330 }
331 
332 EXPORT_SYMBOL(pci_dac_page_to_dma);
333 
334 struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
335 	dma64_addr_t dma_addr)
336 {
337 	return mem_map + (dma_addr >> PAGE_SHIFT);
338 }
339 
340 EXPORT_SYMBOL(pci_dac_dma_to_page);
341 
342 unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
343 	dma64_addr_t dma_addr)
344 {
345 	return dma_addr & ~PAGE_MASK;
346 }
347 
348 EXPORT_SYMBOL(pci_dac_dma_to_offset);
349 
350 void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
351 	dma64_addr_t dma_addr, size_t len, int direction)
352 {
353 	BUG_ON(direction == PCI_DMA_NONE);
354 
355 	dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
356 }
357 
358 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
359 
360 void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
361 	dma64_addr_t dma_addr, size_t len, int direction)
362 {
363 	BUG_ON(direction == PCI_DMA_NONE);
364 
365 	dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
366 }
367 
368 EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
369 
370 #endif /* CONFIG_PCI */
371