xref: /openbmc/linux/arch/openrisc/kernel/dma.c (revision 12eb4683)
1 /*
2  * OpenRISC Linux
3  *
4  * Linux architectural port borrowing liberally from similar works of
5  * others.  All original copyrights apply as per the original source
6  * declaration.
7  *
8  * Modifications for the OpenRISC architecture:
9  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  *
17  * DMA mapping callbacks...
18  * As alloc_coherent is the only DMA callback being used currently, that's
19  * the only thing implemented properly.  The rest need looking into...
20  */
21 
22 #include <linux/dma-mapping.h>
23 #include <linux/dma-debug.h>
24 #include <linux/export.h>
25 #include <linux/dma-attrs.h>
26 
27 #include <asm/cpuinfo.h>
28 #include <asm/spr_defs.h>
29 #include <asm/tlbflush.h>
30 
31 static int
32 page_set_nocache(pte_t *pte, unsigned long addr,
33 		 unsigned long next, struct mm_walk *walk)
34 {
35 	unsigned long cl;
36 
37 	pte_val(*pte) |= _PAGE_CI;
38 
39 	/*
40 	 * Flush the page out of the TLB so that the new page flags get
41 	 * picked up next time there's an access
42 	 */
43 	flush_tlb_page(NULL, addr);
44 
45 	/* Flush page out of dcache */
46 	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
47 		mtspr(SPR_DCBFR, cl);
48 
49 	return 0;
50 }
51 
52 static int
53 page_clear_nocache(pte_t *pte, unsigned long addr,
54 		   unsigned long next, struct mm_walk *walk)
55 {
56 	pte_val(*pte) &= ~_PAGE_CI;
57 
58 	/*
59 	 * Flush the page out of the TLB so that the new page flags get
60 	 * picked up next time there's an access
61 	 */
62 	flush_tlb_page(NULL, addr);
63 
64 	return 0;
65 }
66 
67 /*
68  * Alloc "coherent" memory, which for OpenRISC means simply uncached.
69  *
70  * This function effectively just calls __get_free_pages, sets the
71  * cache-inhibit bit on those pages, and makes sure that the pages are
72  * flushed out of the cache before they are used.
73  *
74  * If the NON_CONSISTENT attribute is set, then this function just
75  * returns "normal", cachable memory.
76  *
77  * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
78  * into consideration here, too.  All current known implementations of
79  * the OR1K support only strongly ordered memory accesses, so that flag
80  * is being ignored for now; uncached but write-combined memory is a
81  * missing feature of the OR1K.
82  */
83 static void *
84 or1k_dma_alloc(struct device *dev, size_t size,
85 	       dma_addr_t *dma_handle, gfp_t gfp,
86 	       struct dma_attrs *attrs)
87 {
88 	unsigned long va;
89 	void *page;
90 	struct mm_walk walk = {
91 		.pte_entry = page_set_nocache,
92 		.mm = &init_mm
93 	};
94 
95 	page = alloc_pages_exact(size, gfp);
96 	if (!page)
97 		return NULL;
98 
99 	/* This gives us the real physical address of the first page. */
100 	*dma_handle = __pa(page);
101 
102 	va = (unsigned long)page;
103 
104 	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
105 		/*
106 		 * We need to iterate through the pages, clearing the dcache for
107 		 * them and setting the cache-inhibit bit.
108 		 */
109 		if (walk_page_range(va, va + size, &walk)) {
110 			free_pages_exact(page, size);
111 			return NULL;
112 		}
113 	}
114 
115 	return (void *)va;
116 }
117 
118 static void
119 or1k_dma_free(struct device *dev, size_t size, void *vaddr,
120 	      dma_addr_t dma_handle, struct dma_attrs *attrs)
121 {
122 	unsigned long va = (unsigned long)vaddr;
123 	struct mm_walk walk = {
124 		.pte_entry = page_clear_nocache,
125 		.mm = &init_mm
126 	};
127 
128 	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
129 		/* walk_page_range shouldn't be able to fail here */
130 		WARN_ON(walk_page_range(va, va + size, &walk));
131 	}
132 
133 	free_pages_exact(vaddr, size);
134 }
135 
136 static dma_addr_t
137 or1k_map_page(struct device *dev, struct page *page,
138 	      unsigned long offset, size_t size,
139 	      enum dma_data_direction dir,
140 	      struct dma_attrs *attrs)
141 {
142 	unsigned long cl;
143 	dma_addr_t addr = page_to_phys(page) + offset;
144 
145 	switch (dir) {
146 	case DMA_TO_DEVICE:
147 		/* Flush the dcache for the requested range */
148 		for (cl = addr; cl < addr + size;
149 		     cl += cpuinfo.dcache_block_size)
150 			mtspr(SPR_DCBFR, cl);
151 		break;
152 	case DMA_FROM_DEVICE:
153 		/* Invalidate the dcache for the requested range */
154 		for (cl = addr; cl < addr + size;
155 		     cl += cpuinfo.dcache_block_size)
156 			mtspr(SPR_DCBIR, cl);
157 		break;
158 	default:
159 		/*
160 		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
161 		 * flush nor invalidate the cache here as the area will need
162 		 * to be manually synced anyway.
163 		 */
164 		break;
165 	}
166 
167 	return addr;
168 }
169 
170 static void
171 or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
172 		size_t size, enum dma_data_direction dir,
173 		struct dma_attrs *attrs)
174 {
175 	/* Nothing special to do here... */
176 }
177 
178 static int
179 or1k_map_sg(struct device *dev, struct scatterlist *sg,
180 	    int nents, enum dma_data_direction dir,
181 	    struct dma_attrs *attrs)
182 {
183 	struct scatterlist *s;
184 	int i;
185 
186 	for_each_sg(sg, s, nents, i) {
187 		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
188 					       s->length, dir, NULL);
189 	}
190 
191 	return nents;
192 }
193 
194 static void
195 or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
196 	      int nents, enum dma_data_direction dir,
197 	      struct dma_attrs *attrs)
198 {
199 	struct scatterlist *s;
200 	int i;
201 
202 	for_each_sg(sg, s, nents, i) {
203 		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
204 	}
205 }
206 
207 static void
208 or1k_sync_single_for_cpu(struct device *dev,
209 			 dma_addr_t dma_handle, size_t size,
210 			 enum dma_data_direction dir)
211 {
212 	unsigned long cl;
213 	dma_addr_t addr = dma_handle;
214 
215 	/* Invalidate the dcache for the requested range */
216 	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
217 		mtspr(SPR_DCBIR, cl);
218 }
219 
220 static void
221 or1k_sync_single_for_device(struct device *dev,
222 			    dma_addr_t dma_handle, size_t size,
223 			    enum dma_data_direction dir)
224 {
225 	unsigned long cl;
226 	dma_addr_t addr = dma_handle;
227 
228 	/* Flush the dcache for the requested range */
229 	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
230 		mtspr(SPR_DCBFR, cl);
231 }
232 
233 struct dma_map_ops or1k_dma_map_ops = {
234 	.alloc = or1k_dma_alloc,
235 	.free = or1k_dma_free,
236 	.map_page = or1k_map_page,
237 	.unmap_page = or1k_unmap_page,
238 	.map_sg = or1k_map_sg,
239 	.unmap_sg = or1k_unmap_sg,
240 	.sync_single_for_cpu = or1k_sync_single_for_cpu,
241 	.sync_single_for_device = or1k_sync_single_for_device,
242 };
243 EXPORT_SYMBOL(or1k_dma_map_ops);
244 
245 /* Number of entries preallocated for DMA-API debugging */
246 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
247 
248 static int __init dma_init(void)
249 {
250 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
251 
252 	return 0;
253 }
254 fs_initcall(dma_init);
255