xref: /openbmc/linux/arch/sparc/kernel/iommu.c (revision c12f048f)
1a88b5ba8SSam Ravnborg /* iommu.c: Generic sparc64 IOMMU support.
2a88b5ba8SSam Ravnborg  *
3a88b5ba8SSam Ravnborg  * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4a88b5ba8SSam Ravnborg  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5a88b5ba8SSam Ravnborg  */
6a88b5ba8SSam Ravnborg 
7a88b5ba8SSam Ravnborg #include <linux/kernel.h>
8066bcacaSPaul Gortmaker #include <linux/export.h>
95a0e3ad6STejun Heo #include <linux/slab.h>
10a88b5ba8SSam Ravnborg #include <linux/delay.h>
11a88b5ba8SSam Ravnborg #include <linux/device.h>
12a88b5ba8SSam Ravnborg #include <linux/dma-mapping.h>
13a88b5ba8SSam Ravnborg #include <linux/errno.h>
14a88b5ba8SSam Ravnborg #include <linux/iommu-helper.h>
15a66022c4SAkinobu Mita #include <linux/bitmap.h>
16a88b5ba8SSam Ravnborg 
17a88b5ba8SSam Ravnborg #ifdef CONFIG_PCI
18a88b5ba8SSam Ravnborg #include <linux/pci.h>
19a88b5ba8SSam Ravnborg #endif
20a88b5ba8SSam Ravnborg 
21a88b5ba8SSam Ravnborg #include <asm/iommu.h>
22a88b5ba8SSam Ravnborg 
23a88b5ba8SSam Ravnborg #include "iommu_common.h"
244ac7b826SSam Ravnborg #include "kernel.h"
25a88b5ba8SSam Ravnborg 
26a88b5ba8SSam Ravnborg #define STC_CTXMATCH_ADDR(STC, CTX)	\
27a88b5ba8SSam Ravnborg 	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
28a88b5ba8SSam Ravnborg #define STC_FLUSHFLAG_INIT(STC) \
29a88b5ba8SSam Ravnborg 	(*((STC)->strbuf_flushflag) = 0UL)
30a88b5ba8SSam Ravnborg #define STC_FLUSHFLAG_SET(STC) \
31a88b5ba8SSam Ravnborg 	(*((STC)->strbuf_flushflag) != 0UL)
32a88b5ba8SSam Ravnborg 
33a88b5ba8SSam Ravnborg #define iommu_read(__reg) \
34a88b5ba8SSam Ravnborg ({	u64 __ret; \
35a88b5ba8SSam Ravnborg 	__asm__ __volatile__("ldxa [%1] %2, %0" \
36a88b5ba8SSam Ravnborg 			     : "=r" (__ret) \
37a88b5ba8SSam Ravnborg 			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
38a88b5ba8SSam Ravnborg 			     : "memory"); \
39a88b5ba8SSam Ravnborg 	__ret; \
40a88b5ba8SSam Ravnborg })
41a88b5ba8SSam Ravnborg #define iommu_write(__reg, __val) \
42a88b5ba8SSam Ravnborg 	__asm__ __volatile__("stxa %0, [%1] %2" \
43a88b5ba8SSam Ravnborg 			     : /* no outputs */ \
44a88b5ba8SSam Ravnborg 			     : "r" (__val), "r" (__reg), \
45a88b5ba8SSam Ravnborg 			       "i" (ASI_PHYS_BYPASS_EC_E))
46a88b5ba8SSam Ravnborg 
47a88b5ba8SSam Ravnborg /* Must be invoked under the IOMMU lock. */
48c12f048fSDavid S. Miller static void iommu_flushall(struct iommu *iommu)
49a88b5ba8SSam Ravnborg {
50a88b5ba8SSam Ravnborg 	if (iommu->iommu_flushinv) {
51a88b5ba8SSam Ravnborg 		iommu_write(iommu->iommu_flushinv, ~(u64)0);
52a88b5ba8SSam Ravnborg 	} else {
53a88b5ba8SSam Ravnborg 		unsigned long tag;
54a88b5ba8SSam Ravnborg 		int entry;
55a88b5ba8SSam Ravnborg 
56a88b5ba8SSam Ravnborg 		tag = iommu->iommu_tags;
57a88b5ba8SSam Ravnborg 		for (entry = 0; entry < 16; entry++) {
58a88b5ba8SSam Ravnborg 			iommu_write(tag, 0);
59a88b5ba8SSam Ravnborg 			tag += 8;
60a88b5ba8SSam Ravnborg 		}
61a88b5ba8SSam Ravnborg 
62a88b5ba8SSam Ravnborg 		/* Ensure completion of previous PIO writes. */
63a88b5ba8SSam Ravnborg 		(void) iommu_read(iommu->write_complete_reg);
64a88b5ba8SSam Ravnborg 	}
65a88b5ba8SSam Ravnborg }
66a88b5ba8SSam Ravnborg 
67a88b5ba8SSam Ravnborg #define IOPTE_CONSISTENT(CTX) \
68a88b5ba8SSam Ravnborg 	(IOPTE_VALID | IOPTE_CACHE | \
69a88b5ba8SSam Ravnborg 	 (((CTX) << 47) & IOPTE_CONTEXT))
70a88b5ba8SSam Ravnborg 
71a88b5ba8SSam Ravnborg #define IOPTE_STREAMING(CTX) \
72a88b5ba8SSam Ravnborg 	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
73a88b5ba8SSam Ravnborg 
74a88b5ba8SSam Ravnborg /* Existing mappings are never marked invalid, instead they
75a88b5ba8SSam Ravnborg  * are pointed to a dummy page.
76a88b5ba8SSam Ravnborg  */
77a88b5ba8SSam Ravnborg #define IOPTE_IS_DUMMY(iommu, iopte)	\
78a88b5ba8SSam Ravnborg 	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
79a88b5ba8SSam Ravnborg 
80a88b5ba8SSam Ravnborg static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
81a88b5ba8SSam Ravnborg {
82a88b5ba8SSam Ravnborg 	unsigned long val = iopte_val(*iopte);
83a88b5ba8SSam Ravnborg 
84a88b5ba8SSam Ravnborg 	val &= ~IOPTE_PAGE;
85a88b5ba8SSam Ravnborg 	val |= iommu->dummy_page_pa;
86a88b5ba8SSam Ravnborg 
87a88b5ba8SSam Ravnborg 	iopte_val(*iopte) = val;
88a88b5ba8SSam Ravnborg }
89a88b5ba8SSam Ravnborg 
90c12f048fSDavid S. Miller /* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
91c12f048fSDavid S. Miller  * facility it must all be done in one pass while under the iommu lock.
92c12f048fSDavid S. Miller  *
93c12f048fSDavid S. Miller  * On sun4u platforms, we only flush the IOMMU once every time we've passed
94c12f048fSDavid S. Miller  * over the entire page table doing allocations.  Therefore we only ever advance
95c12f048fSDavid S. Miller  * the hint and cannot backtrack it.
96c12f048fSDavid S. Miller  */
97c12f048fSDavid S. Miller unsigned long iommu_range_alloc(struct device *dev,
98c12f048fSDavid S. Miller 				struct iommu *iommu,
99c12f048fSDavid S. Miller 				unsigned long npages,
100c12f048fSDavid S. Miller 				unsigned long *handle)
101a88b5ba8SSam Ravnborg {
102c12f048fSDavid S. Miller 	unsigned long n, end, start, limit, boundary_size;
103c12f048fSDavid S. Miller 	struct iommu_arena *arena = &iommu->arena;
104c12f048fSDavid S. Miller 	int pass = 0;
105a88b5ba8SSam Ravnborg 
106c12f048fSDavid S. Miller 	/* This allocator was derived from x86_64's bit string search */
107c12f048fSDavid S. Miller 
108c12f048fSDavid S. Miller 	/* Sanity check */
109c12f048fSDavid S. Miller 	if (unlikely(npages == 0)) {
110c12f048fSDavid S. Miller 		if (printk_ratelimit())
111c12f048fSDavid S. Miller 			WARN_ON(1);
112c12f048fSDavid S. Miller 		return DMA_ERROR_CODE;
113a88b5ba8SSam Ravnborg 	}
114a88b5ba8SSam Ravnborg 
115c12f048fSDavid S. Miller 	if (handle && *handle)
116c12f048fSDavid S. Miller 		start = *handle;
117c12f048fSDavid S. Miller 	else
118c12f048fSDavid S. Miller 		start = arena->hint;
119c12f048fSDavid S. Miller 
120c12f048fSDavid S. Miller 	limit = arena->limit;
121c12f048fSDavid S. Miller 
122c12f048fSDavid S. Miller 	/* The case below can happen if we have a small segment appended
123c12f048fSDavid S. Miller 	 * to a large, or when the previous alloc was at the very end of
124c12f048fSDavid S. Miller 	 * the available space. If so, go back to the beginning and flush.
125c12f048fSDavid S. Miller 	 */
126c12f048fSDavid S. Miller 	if (start >= limit) {
127c12f048fSDavid S. Miller 		start = 0;
128c12f048fSDavid S. Miller 		if (iommu->flush_all)
129c12f048fSDavid S. Miller 			iommu->flush_all(iommu);
130c12f048fSDavid S. Miller 	}
131c12f048fSDavid S. Miller 
132c12f048fSDavid S. Miller  again:
133c12f048fSDavid S. Miller 
134c12f048fSDavid S. Miller 	if (dev)
135c12f048fSDavid S. Miller 		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
136c12f048fSDavid S. Miller 				      1 << IO_PAGE_SHIFT);
137c12f048fSDavid S. Miller 	else
138c12f048fSDavid S. Miller 		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
139c12f048fSDavid S. Miller 
140c12f048fSDavid S. Miller 	n = iommu_area_alloc(arena->map, limit, start, npages,
141c12f048fSDavid S. Miller 			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
142c12f048fSDavid S. Miller 			     boundary_size >> IO_PAGE_SHIFT, 0);
143c12f048fSDavid S. Miller 	if (n == -1) {
144c12f048fSDavid S. Miller 		if (likely(pass < 1)) {
145c12f048fSDavid S. Miller 			/* First failure, rescan from the beginning.  */
146c12f048fSDavid S. Miller 			start = 0;
147c12f048fSDavid S. Miller 			if (iommu->flush_all)
148c12f048fSDavid S. Miller 				iommu->flush_all(iommu);
149c12f048fSDavid S. Miller 			pass++;
150c12f048fSDavid S. Miller 			goto again;
151c12f048fSDavid S. Miller 		} else {
152c12f048fSDavid S. Miller 			/* Second failure, give up */
153c12f048fSDavid S. Miller 			return DMA_ERROR_CODE;
154c12f048fSDavid S. Miller 		}
155c12f048fSDavid S. Miller 	}
156c12f048fSDavid S. Miller 
157c12f048fSDavid S. Miller 	end = n + npages;
158c12f048fSDavid S. Miller 
159c12f048fSDavid S. Miller 	arena->hint = end;
160c12f048fSDavid S. Miller 
161c12f048fSDavid S. Miller 	/* Update handle for SG allocations */
162c12f048fSDavid S. Miller 	if (handle)
163c12f048fSDavid S. Miller 		*handle = end;
164c12f048fSDavid S. Miller 
165c12f048fSDavid S. Miller 	return n;
166c12f048fSDavid S. Miller }
167c12f048fSDavid S. Miller 
168c12f048fSDavid S. Miller void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
169c12f048fSDavid S. Miller {
170c12f048fSDavid S. Miller 	struct iommu_arena *arena = &iommu->arena;
171c12f048fSDavid S. Miller 	unsigned long entry;
172c12f048fSDavid S. Miller 
173c12f048fSDavid S. Miller 	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
174c12f048fSDavid S. Miller 
175c12f048fSDavid S. Miller 	bitmap_clear(arena->map, entry, npages);
176c12f048fSDavid S. Miller }
177a88b5ba8SSam Ravnborg 
178a88b5ba8SSam Ravnborg int iommu_table_init(struct iommu *iommu, int tsbsize,
179a88b5ba8SSam Ravnborg 		     u32 dma_offset, u32 dma_addr_mask,
180a88b5ba8SSam Ravnborg 		     int numa_node)
181a88b5ba8SSam Ravnborg {
182a88b5ba8SSam Ravnborg 	unsigned long i, order, sz, num_tsb_entries;
183a88b5ba8SSam Ravnborg 	struct page *page;
184a88b5ba8SSam Ravnborg 
185a88b5ba8SSam Ravnborg 	num_tsb_entries = tsbsize / sizeof(iopte_t);
186a88b5ba8SSam Ravnborg 
187a88b5ba8SSam Ravnborg 	/* Setup initial software IOMMU state. */
188a88b5ba8SSam Ravnborg 	spin_lock_init(&iommu->lock);
189a88b5ba8SSam Ravnborg 	iommu->ctx_lowest_free = 1;
190c12f048fSDavid S. Miller 	iommu->page_table_map_base = dma_offset;
191a88b5ba8SSam Ravnborg 	iommu->dma_addr_mask = dma_addr_mask;
192a88b5ba8SSam Ravnborg 
193a88b5ba8SSam Ravnborg 	/* Allocate and initialize the free area map.  */
194a88b5ba8SSam Ravnborg 	sz = num_tsb_entries / 8;
195a88b5ba8SSam Ravnborg 	sz = (sz + 7UL) & ~7UL;
196c12f048fSDavid S. Miller 	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
197c12f048fSDavid S. Miller 	if (!iommu->arena.map) {
198c12f048fSDavid S. Miller 		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
199a88b5ba8SSam Ravnborg 		return -ENOMEM;
200c12f048fSDavid S. Miller 	}
201c12f048fSDavid S. Miller 	memset(iommu->arena.map, 0, sz);
202c12f048fSDavid S. Miller 	iommu->arena.limit = num_tsb_entries;
203f1600e54SSowmini Varadhan 
204c12f048fSDavid S. Miller 	if (tlb_type != hypervisor)
205c12f048fSDavid S. Miller 		iommu->flush_all = iommu_flushall;
206a88b5ba8SSam Ravnborg 
207a88b5ba8SSam Ravnborg 	/* Allocate and initialize the dummy page which we
208a88b5ba8SSam Ravnborg 	 * set inactive IO PTEs to point to.
209a88b5ba8SSam Ravnborg 	 */
210a88b5ba8SSam Ravnborg 	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
211a88b5ba8SSam Ravnborg 	if (!page) {
212a88b5ba8SSam Ravnborg 		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
213a88b5ba8SSam Ravnborg 		goto out_free_map;
214a88b5ba8SSam Ravnborg 	}
215a88b5ba8SSam Ravnborg 	iommu->dummy_page = (unsigned long) page_address(page);
216a88b5ba8SSam Ravnborg 	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
217a88b5ba8SSam Ravnborg 	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
218a88b5ba8SSam Ravnborg 
219a88b5ba8SSam Ravnborg 	/* Now allocate and setup the IOMMU page table itself.  */
220a88b5ba8SSam Ravnborg 	order = get_order(tsbsize);
221a88b5ba8SSam Ravnborg 	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
222a88b5ba8SSam Ravnborg 	if (!page) {
223a88b5ba8SSam Ravnborg 		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
224a88b5ba8SSam Ravnborg 		goto out_free_dummy_page;
225a88b5ba8SSam Ravnborg 	}
226a88b5ba8SSam Ravnborg 	iommu->page_table = (iopte_t *)page_address(page);
227a88b5ba8SSam Ravnborg 
228a88b5ba8SSam Ravnborg 	for (i = 0; i < num_tsb_entries; i++)
229a88b5ba8SSam Ravnborg 		iopte_make_dummy(iommu, &iommu->page_table[i]);
230a88b5ba8SSam Ravnborg 
231a88b5ba8SSam Ravnborg 	return 0;
232a88b5ba8SSam Ravnborg 
233a88b5ba8SSam Ravnborg out_free_dummy_page:
234a88b5ba8SSam Ravnborg 	free_page(iommu->dummy_page);
235a88b5ba8SSam Ravnborg 	iommu->dummy_page = 0UL;
236a88b5ba8SSam Ravnborg 
237a88b5ba8SSam Ravnborg out_free_map:
238c12f048fSDavid S. Miller 	kfree(iommu->arena.map);
239c12f048fSDavid S. Miller 	iommu->arena.map = NULL;
240a88b5ba8SSam Ravnborg 
241a88b5ba8SSam Ravnborg 	return -ENOMEM;
242a88b5ba8SSam Ravnborg }
243a88b5ba8SSam Ravnborg 
244c12f048fSDavid S. Miller static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
245a88b5ba8SSam Ravnborg 				    unsigned long npages)
246a88b5ba8SSam Ravnborg {
247a88b5ba8SSam Ravnborg 	unsigned long entry;
248a88b5ba8SSam Ravnborg 
249c12f048fSDavid S. Miller 	entry = iommu_range_alloc(dev, iommu, npages, NULL);
250a88b5ba8SSam Ravnborg 	if (unlikely(entry == DMA_ERROR_CODE))
251a88b5ba8SSam Ravnborg 		return NULL;
252a88b5ba8SSam Ravnborg 
253a88b5ba8SSam Ravnborg 	return iommu->page_table + entry;
254a88b5ba8SSam Ravnborg }
255a88b5ba8SSam Ravnborg 
256a88b5ba8SSam Ravnborg static int iommu_alloc_ctx(struct iommu *iommu)
257a88b5ba8SSam Ravnborg {
258a88b5ba8SSam Ravnborg 	int lowest = iommu->ctx_lowest_free;
259711c71a0SAkinobu Mita 	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
260a88b5ba8SSam Ravnborg 
261711c71a0SAkinobu Mita 	if (unlikely(n == IOMMU_NUM_CTXS)) {
262a88b5ba8SSam Ravnborg 		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
263a88b5ba8SSam Ravnborg 		if (unlikely(n == lowest)) {
264a88b5ba8SSam Ravnborg 			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
265a88b5ba8SSam Ravnborg 			n = 0;
266a88b5ba8SSam Ravnborg 		}
267a88b5ba8SSam Ravnborg 	}
268a88b5ba8SSam Ravnborg 	if (n)
269a88b5ba8SSam Ravnborg 		__set_bit(n, iommu->ctx_bitmap);
270a88b5ba8SSam Ravnborg 
271a88b5ba8SSam Ravnborg 	return n;
272a88b5ba8SSam Ravnborg }
273a88b5ba8SSam Ravnborg 
274a88b5ba8SSam Ravnborg static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
275a88b5ba8SSam Ravnborg {
276a88b5ba8SSam Ravnborg 	if (likely(ctx)) {
277a88b5ba8SSam Ravnborg 		__clear_bit(ctx, iommu->ctx_bitmap);
278a88b5ba8SSam Ravnborg 		if (ctx < iommu->ctx_lowest_free)
279a88b5ba8SSam Ravnborg 			iommu->ctx_lowest_free = ctx;
280a88b5ba8SSam Ravnborg 	}
281a88b5ba8SSam Ravnborg }
282a88b5ba8SSam Ravnborg 
283a88b5ba8SSam Ravnborg static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
284c416258aSAndrzej Pietrasiewicz 				   dma_addr_t *dma_addrp, gfp_t gfp,
285c416258aSAndrzej Pietrasiewicz 				   struct dma_attrs *attrs)
286a88b5ba8SSam Ravnborg {
287c12f048fSDavid S. Miller 	unsigned long flags, order, first_page;
288a88b5ba8SSam Ravnborg 	struct iommu *iommu;
289a88b5ba8SSam Ravnborg 	struct page *page;
290a88b5ba8SSam Ravnborg 	int npages, nid;
291a88b5ba8SSam Ravnborg 	iopte_t *iopte;
292a88b5ba8SSam Ravnborg 	void *ret;
293a88b5ba8SSam Ravnborg 
294a88b5ba8SSam Ravnborg 	size = IO_PAGE_ALIGN(size);
295a88b5ba8SSam Ravnborg 	order = get_order(size);
296a88b5ba8SSam Ravnborg 	if (order >= 10)
297a88b5ba8SSam Ravnborg 		return NULL;
298a88b5ba8SSam Ravnborg 
299a88b5ba8SSam Ravnborg 	nid = dev->archdata.numa_node;
300a88b5ba8SSam Ravnborg 	page = alloc_pages_node(nid, gfp, order);
301a88b5ba8SSam Ravnborg 	if (unlikely(!page))
302a88b5ba8SSam Ravnborg 		return NULL;
303a88b5ba8SSam Ravnborg 
304a88b5ba8SSam Ravnborg 	first_page = (unsigned long) page_address(page);
305a88b5ba8SSam Ravnborg 	memset((char *)first_page, 0, PAGE_SIZE << order);
306a88b5ba8SSam Ravnborg 
307a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
308a88b5ba8SSam Ravnborg 
309c12f048fSDavid S. Miller 	spin_lock_irqsave(&iommu->lock, flags);
310a88b5ba8SSam Ravnborg 	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
311c12f048fSDavid S. Miller 	spin_unlock_irqrestore(&iommu->lock, flags);
312a88b5ba8SSam Ravnborg 
313a88b5ba8SSam Ravnborg 	if (unlikely(iopte == NULL)) {
314a88b5ba8SSam Ravnborg 		free_pages(first_page, order);
315a88b5ba8SSam Ravnborg 		return NULL;
316a88b5ba8SSam Ravnborg 	}
317a88b5ba8SSam Ravnborg 
318c12f048fSDavid S. Miller 	*dma_addrp = (iommu->page_table_map_base +
319a88b5ba8SSam Ravnborg 		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
320a88b5ba8SSam Ravnborg 	ret = (void *) first_page;
321a88b5ba8SSam Ravnborg 	npages = size >> IO_PAGE_SHIFT;
322a88b5ba8SSam Ravnborg 	first_page = __pa(first_page);
323a88b5ba8SSam Ravnborg 	while (npages--) {
324a88b5ba8SSam Ravnborg 		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
325a88b5ba8SSam Ravnborg 				     IOPTE_WRITE |
326a88b5ba8SSam Ravnborg 				     (first_page & IOPTE_PAGE));
327a88b5ba8SSam Ravnborg 		iopte++;
328a88b5ba8SSam Ravnborg 		first_page += IO_PAGE_SIZE;
329a88b5ba8SSam Ravnborg 	}
330a88b5ba8SSam Ravnborg 
331a88b5ba8SSam Ravnborg 	return ret;
332a88b5ba8SSam Ravnborg }
333a88b5ba8SSam Ravnborg 
334a88b5ba8SSam Ravnborg static void dma_4u_free_coherent(struct device *dev, size_t size,
335c416258aSAndrzej Pietrasiewicz 				 void *cpu, dma_addr_t dvma,
336c416258aSAndrzej Pietrasiewicz 				 struct dma_attrs *attrs)
337a88b5ba8SSam Ravnborg {
338a88b5ba8SSam Ravnborg 	struct iommu *iommu;
339c12f048fSDavid S. Miller 	unsigned long flags, order, npages;
340a88b5ba8SSam Ravnborg 
341a88b5ba8SSam Ravnborg 	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
342a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
343a88b5ba8SSam Ravnborg 
344c12f048fSDavid S. Miller 	spin_lock_irqsave(&iommu->lock, flags);
345c12f048fSDavid S. Miller 
346c12f048fSDavid S. Miller 	iommu_range_free(iommu, dvma, npages);
347c12f048fSDavid S. Miller 
348c12f048fSDavid S. Miller 	spin_unlock_irqrestore(&iommu->lock, flags);
349a88b5ba8SSam Ravnborg 
350a88b5ba8SSam Ravnborg 	order = get_order(size);
351a88b5ba8SSam Ravnborg 	if (order < 10)
352a88b5ba8SSam Ravnborg 		free_pages((unsigned long)cpu, order);
353a88b5ba8SSam Ravnborg }
354a88b5ba8SSam Ravnborg 
355797a7568SFUJITA Tomonori static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
356797a7568SFUJITA Tomonori 				  unsigned long offset, size_t sz,
357bc0a14f1SFUJITA Tomonori 				  enum dma_data_direction direction,
358bc0a14f1SFUJITA Tomonori 				  struct dma_attrs *attrs)
359a88b5ba8SSam Ravnborg {
360a88b5ba8SSam Ravnborg 	struct iommu *iommu;
361a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
362a88b5ba8SSam Ravnborg 	iopte_t *base;
363a88b5ba8SSam Ravnborg 	unsigned long flags, npages, oaddr;
364a88b5ba8SSam Ravnborg 	unsigned long i, base_paddr, ctx;
365a88b5ba8SSam Ravnborg 	u32 bus_addr, ret;
366a88b5ba8SSam Ravnborg 	unsigned long iopte_protection;
367a88b5ba8SSam Ravnborg 
368a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
369a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
370a88b5ba8SSam Ravnborg 
371a88b5ba8SSam Ravnborg 	if (unlikely(direction == DMA_NONE))
372a88b5ba8SSam Ravnborg 		goto bad_no_ctx;
373a88b5ba8SSam Ravnborg 
374797a7568SFUJITA Tomonori 	oaddr = (unsigned long)(page_address(page) + offset);
375a88b5ba8SSam Ravnborg 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
376a88b5ba8SSam Ravnborg 	npages >>= IO_PAGE_SHIFT;
377a88b5ba8SSam Ravnborg 
378f1600e54SSowmini Varadhan 	spin_lock_irqsave(&iommu->lock, flags);
379c12f048fSDavid S. Miller 	base = alloc_npages(dev, iommu, npages);
380a88b5ba8SSam Ravnborg 	ctx = 0;
381a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush)
382a88b5ba8SSam Ravnborg 		ctx = iommu_alloc_ctx(iommu);
383a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
384a88b5ba8SSam Ravnborg 
385a88b5ba8SSam Ravnborg 	if (unlikely(!base))
386a88b5ba8SSam Ravnborg 		goto bad;
387a88b5ba8SSam Ravnborg 
388c12f048fSDavid S. Miller 	bus_addr = (iommu->page_table_map_base +
389a88b5ba8SSam Ravnborg 		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
390a88b5ba8SSam Ravnborg 	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
391a88b5ba8SSam Ravnborg 	base_paddr = __pa(oaddr & IO_PAGE_MASK);
392a88b5ba8SSam Ravnborg 	if (strbuf->strbuf_enabled)
393a88b5ba8SSam Ravnborg 		iopte_protection = IOPTE_STREAMING(ctx);
394a88b5ba8SSam Ravnborg 	else
395a88b5ba8SSam Ravnborg 		iopte_protection = IOPTE_CONSISTENT(ctx);
396a88b5ba8SSam Ravnborg 	if (direction != DMA_TO_DEVICE)
397a88b5ba8SSam Ravnborg 		iopte_protection |= IOPTE_WRITE;
398a88b5ba8SSam Ravnborg 
399a88b5ba8SSam Ravnborg 	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
400a88b5ba8SSam Ravnborg 		iopte_val(*base) = iopte_protection | base_paddr;
401a88b5ba8SSam Ravnborg 
402a88b5ba8SSam Ravnborg 	return ret;
403a88b5ba8SSam Ravnborg 
404a88b5ba8SSam Ravnborg bad:
405a88b5ba8SSam Ravnborg 	iommu_free_ctx(iommu, ctx);
406a88b5ba8SSam Ravnborg bad_no_ctx:
407a88b5ba8SSam Ravnborg 	if (printk_ratelimit())
408a88b5ba8SSam Ravnborg 		WARN_ON(1);
409a88b5ba8SSam Ravnborg 	return DMA_ERROR_CODE;
410a88b5ba8SSam Ravnborg }
411a88b5ba8SSam Ravnborg 
412a88b5ba8SSam Ravnborg static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
413a88b5ba8SSam Ravnborg 			 u32 vaddr, unsigned long ctx, unsigned long npages,
414a88b5ba8SSam Ravnborg 			 enum dma_data_direction direction)
415a88b5ba8SSam Ravnborg {
416a88b5ba8SSam Ravnborg 	int limit;
417a88b5ba8SSam Ravnborg 
418a88b5ba8SSam Ravnborg 	if (strbuf->strbuf_ctxflush &&
419a88b5ba8SSam Ravnborg 	    iommu->iommu_ctxflush) {
420a88b5ba8SSam Ravnborg 		unsigned long matchreg, flushreg;
421a88b5ba8SSam Ravnborg 		u64 val;
422a88b5ba8SSam Ravnborg 
423a88b5ba8SSam Ravnborg 		flushreg = strbuf->strbuf_ctxflush;
424a88b5ba8SSam Ravnborg 		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
425a88b5ba8SSam Ravnborg 
426a88b5ba8SSam Ravnborg 		iommu_write(flushreg, ctx);
427a88b5ba8SSam Ravnborg 		val = iommu_read(matchreg);
428a88b5ba8SSam Ravnborg 		val &= 0xffff;
429a88b5ba8SSam Ravnborg 		if (!val)
430a88b5ba8SSam Ravnborg 			goto do_flush_sync;
431a88b5ba8SSam Ravnborg 
432a88b5ba8SSam Ravnborg 		while (val) {
433a88b5ba8SSam Ravnborg 			if (val & 0x1)
434a88b5ba8SSam Ravnborg 				iommu_write(flushreg, ctx);
435a88b5ba8SSam Ravnborg 			val >>= 1;
436a88b5ba8SSam Ravnborg 		}
437a88b5ba8SSam Ravnborg 		val = iommu_read(matchreg);
438a88b5ba8SSam Ravnborg 		if (unlikely(val)) {
439a88b5ba8SSam Ravnborg 			printk(KERN_WARNING "strbuf_flush: ctx flush "
44090181136SSam Ravnborg 			       "timeout matchreg[%llx] ctx[%lx]\n",
441a88b5ba8SSam Ravnborg 			       val, ctx);
442a88b5ba8SSam Ravnborg 			goto do_page_flush;
443a88b5ba8SSam Ravnborg 		}
444a88b5ba8SSam Ravnborg 	} else {
445a88b5ba8SSam Ravnborg 		unsigned long i;
446a88b5ba8SSam Ravnborg 
447a88b5ba8SSam Ravnborg 	do_page_flush:
448a88b5ba8SSam Ravnborg 		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
449a88b5ba8SSam Ravnborg 			iommu_write(strbuf->strbuf_pflush, vaddr);
450a88b5ba8SSam Ravnborg 	}
451a88b5ba8SSam Ravnborg 
452a88b5ba8SSam Ravnborg do_flush_sync:
453a88b5ba8SSam Ravnborg 	/* If the device could not have possibly put dirty data into
454a88b5ba8SSam Ravnborg 	 * the streaming cache, no flush-flag synchronization needs
455a88b5ba8SSam Ravnborg 	 * to be performed.
456a88b5ba8SSam Ravnborg 	 */
457a88b5ba8SSam Ravnborg 	if (direction == DMA_TO_DEVICE)
458a88b5ba8SSam Ravnborg 		return;
459a88b5ba8SSam Ravnborg 
460a88b5ba8SSam Ravnborg 	STC_FLUSHFLAG_INIT(strbuf);
461a88b5ba8SSam Ravnborg 	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
462a88b5ba8SSam Ravnborg 	(void) iommu_read(iommu->write_complete_reg);
463a88b5ba8SSam Ravnborg 
464a88b5ba8SSam Ravnborg 	limit = 100000;
465a88b5ba8SSam Ravnborg 	while (!STC_FLUSHFLAG_SET(strbuf)) {
466a88b5ba8SSam Ravnborg 		limit--;
467a88b5ba8SSam Ravnborg 		if (!limit)
468a88b5ba8SSam Ravnborg 			break;
469a88b5ba8SSam Ravnborg 		udelay(1);
470a88b5ba8SSam Ravnborg 		rmb();
471a88b5ba8SSam Ravnborg 	}
472a88b5ba8SSam Ravnborg 	if (!limit)
473a88b5ba8SSam Ravnborg 		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
474a88b5ba8SSam Ravnborg 		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
475a88b5ba8SSam Ravnborg 		       vaddr, ctx, npages);
476a88b5ba8SSam Ravnborg }
477a88b5ba8SSam Ravnborg 
478797a7568SFUJITA Tomonori static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
479bc0a14f1SFUJITA Tomonori 			      size_t sz, enum dma_data_direction direction,
480bc0a14f1SFUJITA Tomonori 			      struct dma_attrs *attrs)
481a88b5ba8SSam Ravnborg {
482a88b5ba8SSam Ravnborg 	struct iommu *iommu;
483a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
484a88b5ba8SSam Ravnborg 	iopte_t *base;
485a88b5ba8SSam Ravnborg 	unsigned long flags, npages, ctx, i;
486a88b5ba8SSam Ravnborg 
487a88b5ba8SSam Ravnborg 	if (unlikely(direction == DMA_NONE)) {
488a88b5ba8SSam Ravnborg 		if (printk_ratelimit())
489a88b5ba8SSam Ravnborg 			WARN_ON(1);
490a88b5ba8SSam Ravnborg 		return;
491a88b5ba8SSam Ravnborg 	}
492a88b5ba8SSam Ravnborg 
493a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
494a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
495a88b5ba8SSam Ravnborg 
496a88b5ba8SSam Ravnborg 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
497a88b5ba8SSam Ravnborg 	npages >>= IO_PAGE_SHIFT;
498a88b5ba8SSam Ravnborg 	base = iommu->page_table +
499c12f048fSDavid S. Miller 		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
500a88b5ba8SSam Ravnborg 	bus_addr &= IO_PAGE_MASK;
501a88b5ba8SSam Ravnborg 
502a88b5ba8SSam Ravnborg 	spin_lock_irqsave(&iommu->lock, flags);
503a88b5ba8SSam Ravnborg 
504a88b5ba8SSam Ravnborg 	/* Record the context, if any. */
505a88b5ba8SSam Ravnborg 	ctx = 0;
506a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush)
507a88b5ba8SSam Ravnborg 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
508a88b5ba8SSam Ravnborg 
509a88b5ba8SSam Ravnborg 	/* Step 1: Kick data out of streaming buffers if necessary. */
510a88b5ba8SSam Ravnborg 	if (strbuf->strbuf_enabled)
511a88b5ba8SSam Ravnborg 		strbuf_flush(strbuf, iommu, bus_addr, ctx,
512a88b5ba8SSam Ravnborg 			     npages, direction);
513a88b5ba8SSam Ravnborg 
514a88b5ba8SSam Ravnborg 	/* Step 2: Clear out TSB entries. */
515a88b5ba8SSam Ravnborg 	for (i = 0; i < npages; i++)
516a88b5ba8SSam Ravnborg 		iopte_make_dummy(iommu, base + i);
517a88b5ba8SSam Ravnborg 
518c12f048fSDavid S. Miller 	iommu_range_free(iommu, bus_addr, npages);
519f1600e54SSowmini Varadhan 
520c12f048fSDavid S. Miller 	iommu_free_ctx(iommu, ctx);
521c12f048fSDavid S. Miller 
522c12f048fSDavid S. Miller 	spin_unlock_irqrestore(&iommu->lock, flags);
523a88b5ba8SSam Ravnborg }
524a88b5ba8SSam Ravnborg 
525a88b5ba8SSam Ravnborg static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
526bc0a14f1SFUJITA Tomonori 			 int nelems, enum dma_data_direction direction,
527bc0a14f1SFUJITA Tomonori 			 struct dma_attrs *attrs)
528a88b5ba8SSam Ravnborg {
529a88b5ba8SSam Ravnborg 	struct scatterlist *s, *outs, *segstart;
530a88b5ba8SSam Ravnborg 	unsigned long flags, handle, prot, ctx;
531a88b5ba8SSam Ravnborg 	dma_addr_t dma_next = 0, dma_addr;
532a88b5ba8SSam Ravnborg 	unsigned int max_seg_size;
533a88b5ba8SSam Ravnborg 	unsigned long seg_boundary_size;
534a88b5ba8SSam Ravnborg 	int outcount, incount, i;
535a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
536a88b5ba8SSam Ravnborg 	struct iommu *iommu;
537a88b5ba8SSam Ravnborg 	unsigned long base_shift;
538a88b5ba8SSam Ravnborg 
539a88b5ba8SSam Ravnborg 	BUG_ON(direction == DMA_NONE);
540a88b5ba8SSam Ravnborg 
541a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
542a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
543a88b5ba8SSam Ravnborg 	if (nelems == 0 || !iommu)
544a88b5ba8SSam Ravnborg 		return 0;
545a88b5ba8SSam Ravnborg 
546a88b5ba8SSam Ravnborg 	spin_lock_irqsave(&iommu->lock, flags);
547a88b5ba8SSam Ravnborg 
548a88b5ba8SSam Ravnborg 	ctx = 0;
549a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush)
550a88b5ba8SSam Ravnborg 		ctx = iommu_alloc_ctx(iommu);
551a88b5ba8SSam Ravnborg 
552a88b5ba8SSam Ravnborg 	if (strbuf->strbuf_enabled)
553a88b5ba8SSam Ravnborg 		prot = IOPTE_STREAMING(ctx);
554a88b5ba8SSam Ravnborg 	else
555a88b5ba8SSam Ravnborg 		prot = IOPTE_CONSISTENT(ctx);
556a88b5ba8SSam Ravnborg 	if (direction != DMA_TO_DEVICE)
557a88b5ba8SSam Ravnborg 		prot |= IOPTE_WRITE;
558a88b5ba8SSam Ravnborg 
559a88b5ba8SSam Ravnborg 	outs = s = segstart = &sglist[0];
560a88b5ba8SSam Ravnborg 	outcount = 1;
561a88b5ba8SSam Ravnborg 	incount = nelems;
562a88b5ba8SSam Ravnborg 	handle = 0;
563a88b5ba8SSam Ravnborg 
564a88b5ba8SSam Ravnborg 	/* Init first segment length for backout at failure */
565a88b5ba8SSam Ravnborg 	outs->dma_length = 0;
566a88b5ba8SSam Ravnborg 
567a88b5ba8SSam Ravnborg 	max_seg_size = dma_get_max_seg_size(dev);
568a88b5ba8SSam Ravnborg 	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
569a88b5ba8SSam Ravnborg 				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
570c12f048fSDavid S. Miller 	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
571a88b5ba8SSam Ravnborg 	for_each_sg(sglist, s, nelems, i) {
572a88b5ba8SSam Ravnborg 		unsigned long paddr, npages, entry, out_entry = 0, slen;
573a88b5ba8SSam Ravnborg 		iopte_t *base;
574a88b5ba8SSam Ravnborg 
575a88b5ba8SSam Ravnborg 		slen = s->length;
576a88b5ba8SSam Ravnborg 		/* Sanity check */
577a88b5ba8SSam Ravnborg 		if (slen == 0) {
578a88b5ba8SSam Ravnborg 			dma_next = 0;
579a88b5ba8SSam Ravnborg 			continue;
580a88b5ba8SSam Ravnborg 		}
581a88b5ba8SSam Ravnborg 		/* Allocate iommu entries for that segment */
582a88b5ba8SSam Ravnborg 		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
583a88b5ba8SSam Ravnborg 		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
584c12f048fSDavid S. Miller 		entry = iommu_range_alloc(dev, iommu, npages, &handle);
585a88b5ba8SSam Ravnborg 
586a88b5ba8SSam Ravnborg 		/* Handle failure */
587a88b5ba8SSam Ravnborg 		if (unlikely(entry == DMA_ERROR_CODE)) {
588a88b5ba8SSam Ravnborg 			if (printk_ratelimit())
589a88b5ba8SSam Ravnborg 				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
590a88b5ba8SSam Ravnborg 				       " npages %lx\n", iommu, paddr, npages);
591a88b5ba8SSam Ravnborg 			goto iommu_map_failed;
592a88b5ba8SSam Ravnborg 		}
593a88b5ba8SSam Ravnborg 
594a88b5ba8SSam Ravnborg 		base = iommu->page_table + entry;
595a88b5ba8SSam Ravnborg 
596a88b5ba8SSam Ravnborg 		/* Convert entry to a dma_addr_t */
597c12f048fSDavid S. Miller 		dma_addr = iommu->page_table_map_base +
598a88b5ba8SSam Ravnborg 			(entry << IO_PAGE_SHIFT);
599a88b5ba8SSam Ravnborg 		dma_addr |= (s->offset & ~IO_PAGE_MASK);
600a88b5ba8SSam Ravnborg 
601a88b5ba8SSam Ravnborg 		/* Insert into HW table */
602a88b5ba8SSam Ravnborg 		paddr &= IO_PAGE_MASK;
603a88b5ba8SSam Ravnborg 		while (npages--) {
604a88b5ba8SSam Ravnborg 			iopte_val(*base) = prot | paddr;
605a88b5ba8SSam Ravnborg 			base++;
606a88b5ba8SSam Ravnborg 			paddr += IO_PAGE_SIZE;
607a88b5ba8SSam Ravnborg 		}
608a88b5ba8SSam Ravnborg 
609a88b5ba8SSam Ravnborg 		/* If we are in an open segment, try merging */
610a88b5ba8SSam Ravnborg 		if (segstart != s) {
611a88b5ba8SSam Ravnborg 			/* We cannot merge if:
612a88b5ba8SSam Ravnborg 			 * - allocated dma_addr isn't contiguous to previous allocation
613a88b5ba8SSam Ravnborg 			 */
614a88b5ba8SSam Ravnborg 			if ((dma_addr != dma_next) ||
615a88b5ba8SSam Ravnborg 			    (outs->dma_length + s->length > max_seg_size) ||
616a88b5ba8SSam Ravnborg 			    (is_span_boundary(out_entry, base_shift,
617a88b5ba8SSam Ravnborg 					      seg_boundary_size, outs, s))) {
618a88b5ba8SSam Ravnborg 				/* Can't merge: create a new segment */
619a88b5ba8SSam Ravnborg 				segstart = s;
620a88b5ba8SSam Ravnborg 				outcount++;
621a88b5ba8SSam Ravnborg 				outs = sg_next(outs);
622a88b5ba8SSam Ravnborg 			} else {
623a88b5ba8SSam Ravnborg 				outs->dma_length += s->length;
624a88b5ba8SSam Ravnborg 			}
625a88b5ba8SSam Ravnborg 		}
626a88b5ba8SSam Ravnborg 
627a88b5ba8SSam Ravnborg 		if (segstart == s) {
628a88b5ba8SSam Ravnborg 			/* This is a new segment, fill entries */
629a88b5ba8SSam Ravnborg 			outs->dma_address = dma_addr;
630a88b5ba8SSam Ravnborg 			outs->dma_length = slen;
631a88b5ba8SSam Ravnborg 			out_entry = entry;
632a88b5ba8SSam Ravnborg 		}
633a88b5ba8SSam Ravnborg 
634a88b5ba8SSam Ravnborg 		/* Calculate next page pointer for contiguous check */
635a88b5ba8SSam Ravnborg 		dma_next = dma_addr + slen;
636a88b5ba8SSam Ravnborg 	}
637a88b5ba8SSam Ravnborg 
638a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
639a88b5ba8SSam Ravnborg 
640a88b5ba8SSam Ravnborg 	if (outcount < incount) {
641a88b5ba8SSam Ravnborg 		outs = sg_next(outs);
642a88b5ba8SSam Ravnborg 		outs->dma_address = DMA_ERROR_CODE;
643a88b5ba8SSam Ravnborg 		outs->dma_length = 0;
644a88b5ba8SSam Ravnborg 	}
645a88b5ba8SSam Ravnborg 
646a88b5ba8SSam Ravnborg 	return outcount;
647a88b5ba8SSam Ravnborg 
648a88b5ba8SSam Ravnborg iommu_map_failed:
649a88b5ba8SSam Ravnborg 	for_each_sg(sglist, s, nelems, i) {
650a88b5ba8SSam Ravnborg 		if (s->dma_length != 0) {
651a88b5ba8SSam Ravnborg 			unsigned long vaddr, npages, entry, j;
652a88b5ba8SSam Ravnborg 			iopte_t *base;
653a88b5ba8SSam Ravnborg 
654a88b5ba8SSam Ravnborg 			vaddr = s->dma_address & IO_PAGE_MASK;
655a88b5ba8SSam Ravnborg 			npages = iommu_num_pages(s->dma_address, s->dma_length,
656a88b5ba8SSam Ravnborg 						 IO_PAGE_SIZE);
657c12f048fSDavid S. Miller 			iommu_range_free(iommu, vaddr, npages);
658a88b5ba8SSam Ravnborg 
659c12f048fSDavid S. Miller 			entry = (vaddr - iommu->page_table_map_base)
660a88b5ba8SSam Ravnborg 				>> IO_PAGE_SHIFT;
661a88b5ba8SSam Ravnborg 			base = iommu->page_table + entry;
662a88b5ba8SSam Ravnborg 
663a88b5ba8SSam Ravnborg 			for (j = 0; j < npages; j++)
664a88b5ba8SSam Ravnborg 				iopte_make_dummy(iommu, base + j);
665a88b5ba8SSam Ravnborg 
666a88b5ba8SSam Ravnborg 			s->dma_address = DMA_ERROR_CODE;
667a88b5ba8SSam Ravnborg 			s->dma_length = 0;
668a88b5ba8SSam Ravnborg 		}
669a88b5ba8SSam Ravnborg 		if (s == outs)
670a88b5ba8SSam Ravnborg 			break;
671a88b5ba8SSam Ravnborg 	}
672a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
673a88b5ba8SSam Ravnborg 
674a88b5ba8SSam Ravnborg 	return 0;
675a88b5ba8SSam Ravnborg }
676a88b5ba8SSam Ravnborg 
677a88b5ba8SSam Ravnborg /* If contexts are being used, they are the same in all of the mappings
678a88b5ba8SSam Ravnborg  * we make for a particular SG.
679a88b5ba8SSam Ravnborg  */
680c12f048fSDavid S. Miller static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
681a88b5ba8SSam Ravnborg {
682a88b5ba8SSam Ravnborg 	unsigned long ctx = 0;
683a88b5ba8SSam Ravnborg 
684a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush) {
685a88b5ba8SSam Ravnborg 		iopte_t *base;
686a88b5ba8SSam Ravnborg 		u32 bus_addr;
687a88b5ba8SSam Ravnborg 
688a88b5ba8SSam Ravnborg 		bus_addr = sg->dma_address & IO_PAGE_MASK;
689a88b5ba8SSam Ravnborg 		base = iommu->page_table +
690c12f048fSDavid S. Miller 			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
691a88b5ba8SSam Ravnborg 
692a88b5ba8SSam Ravnborg 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
693a88b5ba8SSam Ravnborg 	}
694a88b5ba8SSam Ravnborg 	return ctx;
695a88b5ba8SSam Ravnborg }
696a88b5ba8SSam Ravnborg 
697a88b5ba8SSam Ravnborg static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
698bc0a14f1SFUJITA Tomonori 			    int nelems, enum dma_data_direction direction,
699bc0a14f1SFUJITA Tomonori 			    struct dma_attrs *attrs)
700a88b5ba8SSam Ravnborg {
701a88b5ba8SSam Ravnborg 	unsigned long flags, ctx;
702a88b5ba8SSam Ravnborg 	struct scatterlist *sg;
703a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
704a88b5ba8SSam Ravnborg 	struct iommu *iommu;
705a88b5ba8SSam Ravnborg 
706a88b5ba8SSam Ravnborg 	BUG_ON(direction == DMA_NONE);
707a88b5ba8SSam Ravnborg 
708a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
709a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
710a88b5ba8SSam Ravnborg 
711a88b5ba8SSam Ravnborg 	ctx = fetch_sg_ctx(iommu, sglist);
712a88b5ba8SSam Ravnborg 
713a88b5ba8SSam Ravnborg 	spin_lock_irqsave(&iommu->lock, flags);
714a88b5ba8SSam Ravnborg 
715a88b5ba8SSam Ravnborg 	sg = sglist;
716a88b5ba8SSam Ravnborg 	while (nelems--) {
717a88b5ba8SSam Ravnborg 		dma_addr_t dma_handle = sg->dma_address;
718a88b5ba8SSam Ravnborg 		unsigned int len = sg->dma_length;
719a88b5ba8SSam Ravnborg 		unsigned long npages, entry;
720a88b5ba8SSam Ravnborg 		iopte_t *base;
721a88b5ba8SSam Ravnborg 		int i;
722a88b5ba8SSam Ravnborg 
723a88b5ba8SSam Ravnborg 		if (!len)
724a88b5ba8SSam Ravnborg 			break;
725a88b5ba8SSam Ravnborg 		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
726c12f048fSDavid S. Miller 		iommu_range_free(iommu, dma_handle, npages);
727a88b5ba8SSam Ravnborg 
728c12f048fSDavid S. Miller 		entry = ((dma_handle - iommu->page_table_map_base)
729a88b5ba8SSam Ravnborg 			 >> IO_PAGE_SHIFT);
730a88b5ba8SSam Ravnborg 		base = iommu->page_table + entry;
731a88b5ba8SSam Ravnborg 
732a88b5ba8SSam Ravnborg 		dma_handle &= IO_PAGE_MASK;
733a88b5ba8SSam Ravnborg 		if (strbuf->strbuf_enabled)
734a88b5ba8SSam Ravnborg 			strbuf_flush(strbuf, iommu, dma_handle, ctx,
735a88b5ba8SSam Ravnborg 				     npages, direction);
736a88b5ba8SSam Ravnborg 
737a88b5ba8SSam Ravnborg 		for (i = 0; i < npages; i++)
738a88b5ba8SSam Ravnborg 			iopte_make_dummy(iommu, base + i);
739a88b5ba8SSam Ravnborg 
740a88b5ba8SSam Ravnborg 		sg = sg_next(sg);
741a88b5ba8SSam Ravnborg 	}
742a88b5ba8SSam Ravnborg 
743a88b5ba8SSam Ravnborg 	iommu_free_ctx(iommu, ctx);
744a88b5ba8SSam Ravnborg 
745a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
746a88b5ba8SSam Ravnborg }
747a88b5ba8SSam Ravnborg 
748a88b5ba8SSam Ravnborg static void dma_4u_sync_single_for_cpu(struct device *dev,
749a88b5ba8SSam Ravnborg 				       dma_addr_t bus_addr, size_t sz,
750a88b5ba8SSam Ravnborg 				       enum dma_data_direction direction)
751a88b5ba8SSam Ravnborg {
752a88b5ba8SSam Ravnborg 	struct iommu *iommu;
753a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
754a88b5ba8SSam Ravnborg 	unsigned long flags, ctx, npages;
755a88b5ba8SSam Ravnborg 
756a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
757a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
758a88b5ba8SSam Ravnborg 
759a88b5ba8SSam Ravnborg 	if (!strbuf->strbuf_enabled)
760a88b5ba8SSam Ravnborg 		return;
761a88b5ba8SSam Ravnborg 
762a88b5ba8SSam Ravnborg 	spin_lock_irqsave(&iommu->lock, flags);
763a88b5ba8SSam Ravnborg 
764a88b5ba8SSam Ravnborg 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
765a88b5ba8SSam Ravnborg 	npages >>= IO_PAGE_SHIFT;
766a88b5ba8SSam Ravnborg 	bus_addr &= IO_PAGE_MASK;
767a88b5ba8SSam Ravnborg 
768a88b5ba8SSam Ravnborg 	/* Step 1: Record the context, if any. */
769a88b5ba8SSam Ravnborg 	ctx = 0;
770a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush &&
771a88b5ba8SSam Ravnborg 	    strbuf->strbuf_ctxflush) {
772a88b5ba8SSam Ravnborg 		iopte_t *iopte;
773a88b5ba8SSam Ravnborg 
774a88b5ba8SSam Ravnborg 		iopte = iommu->page_table +
775c12f048fSDavid S. Miller 			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
776a88b5ba8SSam Ravnborg 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
777a88b5ba8SSam Ravnborg 	}
778a88b5ba8SSam Ravnborg 
779a88b5ba8SSam Ravnborg 	/* Step 2: Kick data out of streaming buffers. */
780a88b5ba8SSam Ravnborg 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
781a88b5ba8SSam Ravnborg 
782a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
783a88b5ba8SSam Ravnborg }
784a88b5ba8SSam Ravnborg 
785a88b5ba8SSam Ravnborg static void dma_4u_sync_sg_for_cpu(struct device *dev,
786a88b5ba8SSam Ravnborg 				   struct scatterlist *sglist, int nelems,
787a88b5ba8SSam Ravnborg 				   enum dma_data_direction direction)
788a88b5ba8SSam Ravnborg {
789a88b5ba8SSam Ravnborg 	struct iommu *iommu;
790a88b5ba8SSam Ravnborg 	struct strbuf *strbuf;
791a88b5ba8SSam Ravnborg 	unsigned long flags, ctx, npages, i;
792a88b5ba8SSam Ravnborg 	struct scatterlist *sg, *sgprv;
793a88b5ba8SSam Ravnborg 	u32 bus_addr;
794a88b5ba8SSam Ravnborg 
795a88b5ba8SSam Ravnborg 	iommu = dev->archdata.iommu;
796a88b5ba8SSam Ravnborg 	strbuf = dev->archdata.stc;
797a88b5ba8SSam Ravnborg 
798a88b5ba8SSam Ravnborg 	if (!strbuf->strbuf_enabled)
799a88b5ba8SSam Ravnborg 		return;
800a88b5ba8SSam Ravnborg 
801a88b5ba8SSam Ravnborg 	spin_lock_irqsave(&iommu->lock, flags);
802a88b5ba8SSam Ravnborg 
803a88b5ba8SSam Ravnborg 	/* Step 1: Record the context, if any. */
804a88b5ba8SSam Ravnborg 	ctx = 0;
805a88b5ba8SSam Ravnborg 	if (iommu->iommu_ctxflush &&
806a88b5ba8SSam Ravnborg 	    strbuf->strbuf_ctxflush) {
807a88b5ba8SSam Ravnborg 		iopte_t *iopte;
808a88b5ba8SSam Ravnborg 
809c12f048fSDavid S. Miller 		iopte = iommu->page_table +
810c12f048fSDavid S. Miller 			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
811a88b5ba8SSam Ravnborg 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
812a88b5ba8SSam Ravnborg 	}
813a88b5ba8SSam Ravnborg 
814a88b5ba8SSam Ravnborg 	/* Step 2: Kick data out of streaming buffers. */
815a88b5ba8SSam Ravnborg 	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
816a88b5ba8SSam Ravnborg 	sgprv = NULL;
817a88b5ba8SSam Ravnborg 	for_each_sg(sglist, sg, nelems, i) {
818a88b5ba8SSam Ravnborg 		if (sg->dma_length == 0)
819a88b5ba8SSam Ravnborg 			break;
820a88b5ba8SSam Ravnborg 		sgprv = sg;
821a88b5ba8SSam Ravnborg 	}
822a88b5ba8SSam Ravnborg 
823a88b5ba8SSam Ravnborg 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
824a88b5ba8SSam Ravnborg 		  - bus_addr) >> IO_PAGE_SHIFT;
825a88b5ba8SSam Ravnborg 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
826a88b5ba8SSam Ravnborg 
827a88b5ba8SSam Ravnborg 	spin_unlock_irqrestore(&iommu->lock, flags);
828a88b5ba8SSam Ravnborg }
829a88b5ba8SSam Ravnborg 
83002f7a189SFUJITA Tomonori static struct dma_map_ops sun4u_dma_ops = {
831c416258aSAndrzej Pietrasiewicz 	.alloc			= dma_4u_alloc_coherent,
832c416258aSAndrzej Pietrasiewicz 	.free			= dma_4u_free_coherent,
833797a7568SFUJITA Tomonori 	.map_page		= dma_4u_map_page,
834797a7568SFUJITA Tomonori 	.unmap_page		= dma_4u_unmap_page,
835a88b5ba8SSam Ravnborg 	.map_sg			= dma_4u_map_sg,
836a88b5ba8SSam Ravnborg 	.unmap_sg		= dma_4u_unmap_sg,
837a88b5ba8SSam Ravnborg 	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
838a88b5ba8SSam Ravnborg 	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
839a88b5ba8SSam Ravnborg };
840a88b5ba8SSam Ravnborg 
84102f7a189SFUJITA Tomonori struct dma_map_ops *dma_ops = &sun4u_dma_ops;
842a88b5ba8SSam Ravnborg EXPORT_SYMBOL(dma_ops);
843a88b5ba8SSam Ravnborg 
844a88b5ba8SSam Ravnborg int dma_supported(struct device *dev, u64 device_mask)
845a88b5ba8SSam Ravnborg {
846a88b5ba8SSam Ravnborg 	struct iommu *iommu = dev->archdata.iommu;
847a88b5ba8SSam Ravnborg 	u64 dma_addr_mask = iommu->dma_addr_mask;
848a88b5ba8SSam Ravnborg 
849a88b5ba8SSam Ravnborg 	if (device_mask >= (1UL << 32UL))
850a88b5ba8SSam Ravnborg 		return 0;
851a88b5ba8SSam Ravnborg 
852a88b5ba8SSam Ravnborg 	if ((device_mask & dma_addr_mask) == dma_addr_mask)
853a88b5ba8SSam Ravnborg 		return 1;
854a88b5ba8SSam Ravnborg 
855a88b5ba8SSam Ravnborg #ifdef CONFIG_PCI
856bf70053cSYijing Wang 	if (dev_is_pci(dev))
857ee664a92SFUJITA Tomonori 		return pci64_dma_supported(to_pci_dev(dev), device_mask);
858a88b5ba8SSam Ravnborg #endif
859a88b5ba8SSam Ravnborg 
860a88b5ba8SSam Ravnborg 	return 0;
861a88b5ba8SSam Ravnborg }
862a88b5ba8SSam Ravnborg EXPORT_SYMBOL(dma_supported);
863