xref: /openbmc/linux/arch/xtensa/kernel/pci-dma.c (revision c75959a6)
15a0015d6SChris Zankel /*
25a0015d6SChris Zankel  * DMA coherent memory allocation.
35a0015d6SChris Zankel  *
45a0015d6SChris Zankel  * This program is free software; you can redistribute  it and/or modify it
55a0015d6SChris Zankel  * under  the terms of  the GNU General  Public License as published by the
65a0015d6SChris Zankel  * Free Software Foundation;  either version 2 of the  License, or (at your
75a0015d6SChris Zankel  * option) any later version.
85a0015d6SChris Zankel  *
95a0015d6SChris Zankel  * Copyright (C) 2002 - 2005 Tensilica Inc.
10c75959a6SMax Filippov  * Copyright (C) 2015 Cadence Design Systems Inc.
115a0015d6SChris Zankel  *
125a0015d6SChris Zankel  * Based on version for i386.
135a0015d6SChris Zankel  *
145a0015d6SChris Zankel  * Chris Zankel <chris@zankel.net>
155a0015d6SChris Zankel  * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
165a0015d6SChris Zankel  */
175a0015d6SChris Zankel 
185a0015d6SChris Zankel #include <linux/types.h>
195a0015d6SChris Zankel #include <linux/mm.h>
205a0015d6SChris Zankel #include <linux/string.h>
215a0015d6SChris Zankel #include <linux/pci.h>
225a0e3ad6STejun Heo #include <linux/gfp.h>
23d3738f40SMax Filippov #include <linux/module.h>
245a0015d6SChris Zankel #include <asm/io.h>
255a0015d6SChris Zankel #include <asm/cacheflush.h>
265a0015d6SChris Zankel 
27c75959a6SMax Filippov void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
28c75959a6SMax Filippov 		    enum dma_data_direction dir)
29c75959a6SMax Filippov {
30c75959a6SMax Filippov 	switch (dir) {
31c75959a6SMax Filippov 	case DMA_BIDIRECTIONAL:
32c75959a6SMax Filippov 		__flush_invalidate_dcache_range((unsigned long)vaddr, size);
33c75959a6SMax Filippov 		break;
34c75959a6SMax Filippov 
35c75959a6SMax Filippov 	case DMA_FROM_DEVICE:
36c75959a6SMax Filippov 		__invalidate_dcache_range((unsigned long)vaddr, size);
37c75959a6SMax Filippov 		break;
38c75959a6SMax Filippov 
39c75959a6SMax Filippov 	case DMA_TO_DEVICE:
40c75959a6SMax Filippov 		__flush_dcache_range((unsigned long)vaddr, size);
41c75959a6SMax Filippov 		break;
42c75959a6SMax Filippov 
43c75959a6SMax Filippov 	case DMA_NONE:
44c75959a6SMax Filippov 		BUG();
45c75959a6SMax Filippov 		break;
46c75959a6SMax Filippov 	}
47c75959a6SMax Filippov }
48c75959a6SMax Filippov EXPORT_SYMBOL(dma_cache_sync);
49c75959a6SMax Filippov 
50c75959a6SMax Filippov static void xtensa_sync_single_for_cpu(struct device *dev,
51c75959a6SMax Filippov 				       dma_addr_t dma_handle, size_t size,
52c75959a6SMax Filippov 				       enum dma_data_direction dir)
53c75959a6SMax Filippov {
54c75959a6SMax Filippov 	void *vaddr;
55c75959a6SMax Filippov 
56c75959a6SMax Filippov 	switch (dir) {
57c75959a6SMax Filippov 	case DMA_BIDIRECTIONAL:
58c75959a6SMax Filippov 	case DMA_FROM_DEVICE:
59c75959a6SMax Filippov 		vaddr = bus_to_virt(dma_handle);
60c75959a6SMax Filippov 		__invalidate_dcache_range((unsigned long)vaddr, size);
61c75959a6SMax Filippov 		break;
62c75959a6SMax Filippov 
63c75959a6SMax Filippov 	case DMA_NONE:
64c75959a6SMax Filippov 		BUG();
65c75959a6SMax Filippov 		break;
66c75959a6SMax Filippov 
67c75959a6SMax Filippov 	default:
68c75959a6SMax Filippov 		break;
69c75959a6SMax Filippov 	}
70c75959a6SMax Filippov }
71c75959a6SMax Filippov 
72c75959a6SMax Filippov static void xtensa_sync_single_for_device(struct device *dev,
73c75959a6SMax Filippov 					  dma_addr_t dma_handle, size_t size,
74c75959a6SMax Filippov 					  enum dma_data_direction dir)
75c75959a6SMax Filippov {
76c75959a6SMax Filippov 	void *vaddr;
77c75959a6SMax Filippov 
78c75959a6SMax Filippov 	switch (dir) {
79c75959a6SMax Filippov 	case DMA_BIDIRECTIONAL:
80c75959a6SMax Filippov 	case DMA_TO_DEVICE:
81c75959a6SMax Filippov 		vaddr = bus_to_virt(dma_handle);
82c75959a6SMax Filippov 		__flush_dcache_range((unsigned long)vaddr, size);
83c75959a6SMax Filippov 		break;
84c75959a6SMax Filippov 
85c75959a6SMax Filippov 	case DMA_NONE:
86c75959a6SMax Filippov 		BUG();
87c75959a6SMax Filippov 		break;
88c75959a6SMax Filippov 
89c75959a6SMax Filippov 	default:
90c75959a6SMax Filippov 		break;
91c75959a6SMax Filippov 	}
92c75959a6SMax Filippov }
93c75959a6SMax Filippov 
94c75959a6SMax Filippov static void xtensa_sync_sg_for_cpu(struct device *dev,
95c75959a6SMax Filippov 				   struct scatterlist *sg, int nents,
96c75959a6SMax Filippov 				   enum dma_data_direction dir)
97c75959a6SMax Filippov {
98c75959a6SMax Filippov 	struct scatterlist *s;
99c75959a6SMax Filippov 	int i;
100c75959a6SMax Filippov 
101c75959a6SMax Filippov 	for_each_sg(sg, s, nents, i) {
102c75959a6SMax Filippov 		xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
103c75959a6SMax Filippov 					   sg_dma_len(s), dir);
104c75959a6SMax Filippov 	}
105c75959a6SMax Filippov }
106c75959a6SMax Filippov 
107c75959a6SMax Filippov static void xtensa_sync_sg_for_device(struct device *dev,
108c75959a6SMax Filippov 				      struct scatterlist *sg, int nents,
109c75959a6SMax Filippov 				      enum dma_data_direction dir)
110c75959a6SMax Filippov {
111c75959a6SMax Filippov 	struct scatterlist *s;
112c75959a6SMax Filippov 	int i;
113c75959a6SMax Filippov 
114c75959a6SMax Filippov 	for_each_sg(sg, s, nents, i) {
115c75959a6SMax Filippov 		xtensa_sync_single_for_device(dev, sg_dma_address(s),
116c75959a6SMax Filippov 					      sg_dma_len(s), dir);
117c75959a6SMax Filippov 	}
118c75959a6SMax Filippov }
119c75959a6SMax Filippov 
1205a0015d6SChris Zankel /*
1215a0015d6SChris Zankel  * Note: We assume that the full memory space is always mapped to 'kseg'
1225a0015d6SChris Zankel  *	 Otherwise we have to use page attributes (not implemented).
1235a0015d6SChris Zankel  */
1245a0015d6SChris Zankel 
125c75959a6SMax Filippov static void *xtensa_dma_alloc(struct device *dev, size_t size,
126c75959a6SMax Filippov 			      dma_addr_t *handle, gfp_t flag,
127c75959a6SMax Filippov 			      struct dma_attrs *attrs)
1285a0015d6SChris Zankel {
129173d6681SChris Zankel 	unsigned long ret;
130173d6681SChris Zankel 	unsigned long uncached = 0;
1315a0015d6SChris Zankel 
1325a0015d6SChris Zankel 	/* ignore region speicifiers */
1335a0015d6SChris Zankel 
134173d6681SChris Zankel 	flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
1355a0015d6SChris Zankel 
136173d6681SChris Zankel 	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
137173d6681SChris Zankel 		flag |= GFP_DMA;
138173d6681SChris Zankel 	ret = (unsigned long)__get_free_pages(flag, get_order(size));
139173d6681SChris Zankel 
140173d6681SChris Zankel 	if (ret == 0)
141173d6681SChris Zankel 		return NULL;
142173d6681SChris Zankel 
143173d6681SChris Zankel 	/* We currently don't support coherent memory outside KSEG */
144173d6681SChris Zankel 
1451ca49463SAlan Douglas 	BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
1461ca49463SAlan Douglas 	       ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
147173d6681SChris Zankel 
148173d6681SChris Zankel 	uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
149173d6681SChris Zankel 	*handle = virt_to_bus((void *)ret);
150c75959a6SMax Filippov 	__invalidate_dcache_range(ret, size);
151173d6681SChris Zankel 
152173d6681SChris Zankel 	return (void *)uncached;
1535a0015d6SChris Zankel }
1545a0015d6SChris Zankel 
155c75959a6SMax Filippov static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
156c75959a6SMax Filippov 			    dma_addr_t dma_handle, struct dma_attrs *attrs)
1575a0015d6SChris Zankel {
1581ca49463SAlan Douglas 	unsigned long addr = (unsigned long)vaddr +
1591ca49463SAlan Douglas 		XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
160173d6681SChris Zankel 
1611ca49463SAlan Douglas 	BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
1621ca49463SAlan Douglas 	       addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
163173d6681SChris Zankel 
164173d6681SChris Zankel 	free_pages(addr, get_order(size));
1655a0015d6SChris Zankel }
1665a0015d6SChris Zankel 
167c75959a6SMax Filippov static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
168c75959a6SMax Filippov 				  unsigned long offset, size_t size,
169c75959a6SMax Filippov 				  enum dma_data_direction dir,
170c75959a6SMax Filippov 				  struct dma_attrs *attrs)
1715a0015d6SChris Zankel {
172c75959a6SMax Filippov 	dma_addr_t dma_handle = page_to_phys(page) + offset;
1735a0015d6SChris Zankel 
174c75959a6SMax Filippov 	BUG_ON(PageHighMem(page));
175c75959a6SMax Filippov 	xtensa_sync_single_for_device(dev, dma_handle, size, dir);
176c75959a6SMax Filippov 	return dma_handle;
177c75959a6SMax Filippov }
178c75959a6SMax Filippov 
179c75959a6SMax Filippov static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
180c75959a6SMax Filippov 			      size_t size, enum dma_data_direction dir,
181c75959a6SMax Filippov 			      struct dma_attrs *attrs)
182c75959a6SMax Filippov {
183c75959a6SMax Filippov 	xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
184c75959a6SMax Filippov }
185c75959a6SMax Filippov 
186c75959a6SMax Filippov static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
187c75959a6SMax Filippov 			 int nents, enum dma_data_direction dir,
188c75959a6SMax Filippov 			 struct dma_attrs *attrs)
189c75959a6SMax Filippov {
190c75959a6SMax Filippov 	struct scatterlist *s;
191c75959a6SMax Filippov 	int i;
192c75959a6SMax Filippov 
193c75959a6SMax Filippov 	for_each_sg(sg, s, nents, i) {
194c75959a6SMax Filippov 		s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
195c75959a6SMax Filippov 						 s->length, dir, attrs);
196c75959a6SMax Filippov 	}
197c75959a6SMax Filippov 	return nents;
198c75959a6SMax Filippov }
199c75959a6SMax Filippov 
200c75959a6SMax Filippov static void xtensa_unmap_sg(struct device *dev,
201c75959a6SMax Filippov 			    struct scatterlist *sg, int nents,
202c75959a6SMax Filippov 			    enum dma_data_direction dir,
203c75959a6SMax Filippov 			    struct dma_attrs *attrs)
204c75959a6SMax Filippov {
205c75959a6SMax Filippov 	struct scatterlist *s;
206c75959a6SMax Filippov 	int i;
207c75959a6SMax Filippov 
208c75959a6SMax Filippov 	for_each_sg(sg, s, nents, i) {
209c75959a6SMax Filippov 		xtensa_unmap_page(dev, sg_dma_address(s),
210c75959a6SMax Filippov 				  sg_dma_len(s), dir, attrs);
2115a0015d6SChris Zankel 	}
2125a0015d6SChris Zankel }
213c75959a6SMax Filippov 
214c75959a6SMax Filippov int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
215c75959a6SMax Filippov {
216c75959a6SMax Filippov 	return 0;
217c75959a6SMax Filippov }
218c75959a6SMax Filippov 
219c75959a6SMax Filippov struct dma_map_ops xtensa_dma_map_ops = {
220c75959a6SMax Filippov 	.alloc = xtensa_dma_alloc,
221c75959a6SMax Filippov 	.free = xtensa_dma_free,
222c75959a6SMax Filippov 	.map_page = xtensa_map_page,
223c75959a6SMax Filippov 	.unmap_page = xtensa_unmap_page,
224c75959a6SMax Filippov 	.map_sg = xtensa_map_sg,
225c75959a6SMax Filippov 	.unmap_sg = xtensa_unmap_sg,
226c75959a6SMax Filippov 	.sync_single_for_cpu = xtensa_sync_single_for_cpu,
227c75959a6SMax Filippov 	.sync_single_for_device = xtensa_sync_single_for_device,
228c75959a6SMax Filippov 	.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
229c75959a6SMax Filippov 	.sync_sg_for_device = xtensa_sync_sg_for_device,
230c75959a6SMax Filippov 	.mapping_error = xtensa_dma_mapping_error,
231c75959a6SMax Filippov };
232c75959a6SMax Filippov EXPORT_SYMBOL(xtensa_dma_map_ops);
233c75959a6SMax Filippov 
234c75959a6SMax Filippov #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
235c75959a6SMax Filippov 
236c75959a6SMax Filippov static int __init xtensa_dma_init(void)
237c75959a6SMax Filippov {
238c75959a6SMax Filippov 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
239c75959a6SMax Filippov 	return 0;
240c75959a6SMax Filippov }
241c75959a6SMax Filippov fs_initcall(xtensa_dma_init);
242