xref: /openbmc/linux/arch/sparc/kernel/iommu.c (revision d623f60d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* iommu.c: Generic sparc64 IOMMU support.
3  *
4  * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
5  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/iommu-helper.h>
16 #include <linux/bitmap.h>
17 #include <asm/iommu-common.h>
18 
19 #ifdef CONFIG_PCI
20 #include <linux/pci.h>
21 #endif
22 
23 #include <asm/iommu.h>
24 
25 #include "iommu_common.h"
26 #include "kernel.h"
27 
28 #define STC_CTXMATCH_ADDR(STC, CTX)	\
29 	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
30 #define STC_FLUSHFLAG_INIT(STC) \
31 	(*((STC)->strbuf_flushflag) = 0UL)
32 #define STC_FLUSHFLAG_SET(STC) \
33 	(*((STC)->strbuf_flushflag) != 0UL)
34 
35 #define iommu_read(__reg) \
36 ({	u64 __ret; \
37 	__asm__ __volatile__("ldxa [%1] %2, %0" \
38 			     : "=r" (__ret) \
39 			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
40 			     : "memory"); \
41 	__ret; \
42 })
43 #define iommu_write(__reg, __val) \
44 	__asm__ __volatile__("stxa %0, [%1] %2" \
45 			     : /* no outputs */ \
46 			     : "r" (__val), "r" (__reg), \
47 			       "i" (ASI_PHYS_BYPASS_EC_E))
48 
49 /* Must be invoked under the IOMMU lock. */
50 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
51 {
52 	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
53 	if (iommu->iommu_flushinv) {
54 		iommu_write(iommu->iommu_flushinv, ~(u64)0);
55 	} else {
56 		unsigned long tag;
57 		int entry;
58 
59 		tag = iommu->iommu_tags;
60 		for (entry = 0; entry < 16; entry++) {
61 			iommu_write(tag, 0);
62 			tag += 8;
63 		}
64 
65 		/* Ensure completion of previous PIO writes. */
66 		(void) iommu_read(iommu->write_complete_reg);
67 	}
68 }
69 
70 #define IOPTE_CONSISTENT(CTX) \
71 	(IOPTE_VALID | IOPTE_CACHE | \
72 	 (((CTX) << 47) & IOPTE_CONTEXT))
73 
74 #define IOPTE_STREAMING(CTX) \
75 	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
76 
77 /* Existing mappings are never marked invalid, instead they
78  * are pointed to a dummy page.
79  */
80 #define IOPTE_IS_DUMMY(iommu, iopte)	\
81 	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
82 
83 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
84 {
85 	unsigned long val = iopte_val(*iopte);
86 
87 	val &= ~IOPTE_PAGE;
88 	val |= iommu->dummy_page_pa;
89 
90 	iopte_val(*iopte) = val;
91 }
92 
93 int iommu_table_init(struct iommu *iommu, int tsbsize,
94 		     u32 dma_offset, u32 dma_addr_mask,
95 		     int numa_node)
96 {
97 	unsigned long i, order, sz, num_tsb_entries;
98 	struct page *page;
99 
100 	num_tsb_entries = tsbsize / sizeof(iopte_t);
101 
102 	/* Setup initial software IOMMU state. */
103 	spin_lock_init(&iommu->lock);
104 	iommu->ctx_lowest_free = 1;
105 	iommu->tbl.table_map_base = dma_offset;
106 	iommu->dma_addr_mask = dma_addr_mask;
107 
108 	/* Allocate and initialize the free area map.  */
109 	sz = num_tsb_entries / 8;
110 	sz = (sz + 7UL) & ~7UL;
111 	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
112 	if (!iommu->tbl.map)
113 		return -ENOMEM;
114 	memset(iommu->tbl.map, 0, sz);
115 
116 	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
117 			    (tlb_type != hypervisor ? iommu_flushall : NULL),
118 			    false, 1, false);
119 
120 	/* Allocate and initialize the dummy page which we
121 	 * set inactive IO PTEs to point to.
122 	 */
123 	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
124 	if (!page) {
125 		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
126 		goto out_free_map;
127 	}
128 	iommu->dummy_page = (unsigned long) page_address(page);
129 	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
130 	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
131 
132 	/* Now allocate and setup the IOMMU page table itself.  */
133 	order = get_order(tsbsize);
134 	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
135 	if (!page) {
136 		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
137 		goto out_free_dummy_page;
138 	}
139 	iommu->page_table = (iopte_t *)page_address(page);
140 
141 	for (i = 0; i < num_tsb_entries; i++)
142 		iopte_make_dummy(iommu, &iommu->page_table[i]);
143 
144 	return 0;
145 
146 out_free_dummy_page:
147 	free_page(iommu->dummy_page);
148 	iommu->dummy_page = 0UL;
149 
150 out_free_map:
151 	kfree(iommu->tbl.map);
152 	iommu->tbl.map = NULL;
153 
154 	return -ENOMEM;
155 }
156 
157 static inline iopte_t *alloc_npages(struct device *dev,
158 				    struct iommu *iommu,
159 				    unsigned long npages)
160 {
161 	unsigned long entry;
162 
163 	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
164 				      (unsigned long)(-1), 0);
165 	if (unlikely(entry == IOMMU_ERROR_CODE))
166 		return NULL;
167 
168 	return iommu->page_table + entry;
169 }
170 
171 static int iommu_alloc_ctx(struct iommu *iommu)
172 {
173 	int lowest = iommu->ctx_lowest_free;
174 	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
175 
176 	if (unlikely(n == IOMMU_NUM_CTXS)) {
177 		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
178 		if (unlikely(n == lowest)) {
179 			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
180 			n = 0;
181 		}
182 	}
183 	if (n)
184 		__set_bit(n, iommu->ctx_bitmap);
185 
186 	return n;
187 }
188 
189 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
190 {
191 	if (likely(ctx)) {
192 		__clear_bit(ctx, iommu->ctx_bitmap);
193 		if (ctx < iommu->ctx_lowest_free)
194 			iommu->ctx_lowest_free = ctx;
195 	}
196 }
197 
198 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
199 				   dma_addr_t *dma_addrp, gfp_t gfp,
200 				   unsigned long attrs)
201 {
202 	unsigned long order, first_page;
203 	struct iommu *iommu;
204 	struct page *page;
205 	int npages, nid;
206 	iopte_t *iopte;
207 	void *ret;
208 
209 	size = IO_PAGE_ALIGN(size);
210 	order = get_order(size);
211 	if (order >= 10)
212 		return NULL;
213 
214 	nid = dev->archdata.numa_node;
215 	page = alloc_pages_node(nid, gfp, order);
216 	if (unlikely(!page))
217 		return NULL;
218 
219 	first_page = (unsigned long) page_address(page);
220 	memset((char *)first_page, 0, PAGE_SIZE << order);
221 
222 	iommu = dev->archdata.iommu;
223 
224 	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
225 
226 	if (unlikely(iopte == NULL)) {
227 		free_pages(first_page, order);
228 		return NULL;
229 	}
230 
231 	*dma_addrp = (iommu->tbl.table_map_base +
232 		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
233 	ret = (void *) first_page;
234 	npages = size >> IO_PAGE_SHIFT;
235 	first_page = __pa(first_page);
236 	while (npages--) {
237 		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
238 				     IOPTE_WRITE |
239 				     (first_page & IOPTE_PAGE));
240 		iopte++;
241 		first_page += IO_PAGE_SIZE;
242 	}
243 
244 	return ret;
245 }
246 
247 static void dma_4u_free_coherent(struct device *dev, size_t size,
248 				 void *cpu, dma_addr_t dvma,
249 				 unsigned long attrs)
250 {
251 	struct iommu *iommu;
252 	unsigned long order, npages;
253 
254 	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
255 	iommu = dev->archdata.iommu;
256 
257 	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
258 
259 	order = get_order(size);
260 	if (order < 10)
261 		free_pages((unsigned long)cpu, order);
262 }
263 
264 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
265 				  unsigned long offset, size_t sz,
266 				  enum dma_data_direction direction,
267 				  unsigned long attrs)
268 {
269 	struct iommu *iommu;
270 	struct strbuf *strbuf;
271 	iopte_t *base;
272 	unsigned long flags, npages, oaddr;
273 	unsigned long i, base_paddr, ctx;
274 	u32 bus_addr, ret;
275 	unsigned long iopte_protection;
276 
277 	iommu = dev->archdata.iommu;
278 	strbuf = dev->archdata.stc;
279 
280 	if (unlikely(direction == DMA_NONE))
281 		goto bad_no_ctx;
282 
283 	oaddr = (unsigned long)(page_address(page) + offset);
284 	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
285 	npages >>= IO_PAGE_SHIFT;
286 
287 	base = alloc_npages(dev, iommu, npages);
288 	spin_lock_irqsave(&iommu->lock, flags);
289 	ctx = 0;
290 	if (iommu->iommu_ctxflush)
291 		ctx = iommu_alloc_ctx(iommu);
292 	spin_unlock_irqrestore(&iommu->lock, flags);
293 
294 	if (unlikely(!base))
295 		goto bad;
296 
297 	bus_addr = (iommu->tbl.table_map_base +
298 		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
299 	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
300 	base_paddr = __pa(oaddr & IO_PAGE_MASK);
301 	if (strbuf->strbuf_enabled)
302 		iopte_protection = IOPTE_STREAMING(ctx);
303 	else
304 		iopte_protection = IOPTE_CONSISTENT(ctx);
305 	if (direction != DMA_TO_DEVICE)
306 		iopte_protection |= IOPTE_WRITE;
307 
308 	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
309 		iopte_val(*base) = iopte_protection | base_paddr;
310 
311 	return ret;
312 
313 bad:
314 	iommu_free_ctx(iommu, ctx);
315 bad_no_ctx:
316 	if (printk_ratelimit())
317 		WARN_ON(1);
318 	return SPARC_MAPPING_ERROR;
319 }
320 
321 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
322 			 u32 vaddr, unsigned long ctx, unsigned long npages,
323 			 enum dma_data_direction direction)
324 {
325 	int limit;
326 
327 	if (strbuf->strbuf_ctxflush &&
328 	    iommu->iommu_ctxflush) {
329 		unsigned long matchreg, flushreg;
330 		u64 val;
331 
332 		flushreg = strbuf->strbuf_ctxflush;
333 		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
334 
335 		iommu_write(flushreg, ctx);
336 		val = iommu_read(matchreg);
337 		val &= 0xffff;
338 		if (!val)
339 			goto do_flush_sync;
340 
341 		while (val) {
342 			if (val & 0x1)
343 				iommu_write(flushreg, ctx);
344 			val >>= 1;
345 		}
346 		val = iommu_read(matchreg);
347 		if (unlikely(val)) {
348 			printk(KERN_WARNING "strbuf_flush: ctx flush "
349 			       "timeout matchreg[%llx] ctx[%lx]\n",
350 			       val, ctx);
351 			goto do_page_flush;
352 		}
353 	} else {
354 		unsigned long i;
355 
356 	do_page_flush:
357 		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
358 			iommu_write(strbuf->strbuf_pflush, vaddr);
359 	}
360 
361 do_flush_sync:
362 	/* If the device could not have possibly put dirty data into
363 	 * the streaming cache, no flush-flag synchronization needs
364 	 * to be performed.
365 	 */
366 	if (direction == DMA_TO_DEVICE)
367 		return;
368 
369 	STC_FLUSHFLAG_INIT(strbuf);
370 	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
371 	(void) iommu_read(iommu->write_complete_reg);
372 
373 	limit = 100000;
374 	while (!STC_FLUSHFLAG_SET(strbuf)) {
375 		limit--;
376 		if (!limit)
377 			break;
378 		udelay(1);
379 		rmb();
380 	}
381 	if (!limit)
382 		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
383 		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
384 		       vaddr, ctx, npages);
385 }
386 
387 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
388 			      size_t sz, enum dma_data_direction direction,
389 			      unsigned long attrs)
390 {
391 	struct iommu *iommu;
392 	struct strbuf *strbuf;
393 	iopte_t *base;
394 	unsigned long flags, npages, ctx, i;
395 
396 	if (unlikely(direction == DMA_NONE)) {
397 		if (printk_ratelimit())
398 			WARN_ON(1);
399 		return;
400 	}
401 
402 	iommu = dev->archdata.iommu;
403 	strbuf = dev->archdata.stc;
404 
405 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
406 	npages >>= IO_PAGE_SHIFT;
407 	base = iommu->page_table +
408 		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
409 	bus_addr &= IO_PAGE_MASK;
410 
411 	spin_lock_irqsave(&iommu->lock, flags);
412 
413 	/* Record the context, if any. */
414 	ctx = 0;
415 	if (iommu->iommu_ctxflush)
416 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
417 
418 	/* Step 1: Kick data out of streaming buffers if necessary. */
419 	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
420 		strbuf_flush(strbuf, iommu, bus_addr, ctx,
421 			     npages, direction);
422 
423 	/* Step 2: Clear out TSB entries. */
424 	for (i = 0; i < npages; i++)
425 		iopte_make_dummy(iommu, base + i);
426 
427 	iommu_free_ctx(iommu, ctx);
428 	spin_unlock_irqrestore(&iommu->lock, flags);
429 
430 	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
431 }
432 
433 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
434 			 int nelems, enum dma_data_direction direction,
435 			 unsigned long attrs)
436 {
437 	struct scatterlist *s, *outs, *segstart;
438 	unsigned long flags, handle, prot, ctx;
439 	dma_addr_t dma_next = 0, dma_addr;
440 	unsigned int max_seg_size;
441 	unsigned long seg_boundary_size;
442 	int outcount, incount, i;
443 	struct strbuf *strbuf;
444 	struct iommu *iommu;
445 	unsigned long base_shift;
446 
447 	BUG_ON(direction == DMA_NONE);
448 
449 	iommu = dev->archdata.iommu;
450 	strbuf = dev->archdata.stc;
451 	if (nelems == 0 || !iommu)
452 		return 0;
453 
454 	spin_lock_irqsave(&iommu->lock, flags);
455 
456 	ctx = 0;
457 	if (iommu->iommu_ctxflush)
458 		ctx = iommu_alloc_ctx(iommu);
459 
460 	if (strbuf->strbuf_enabled)
461 		prot = IOPTE_STREAMING(ctx);
462 	else
463 		prot = IOPTE_CONSISTENT(ctx);
464 	if (direction != DMA_TO_DEVICE)
465 		prot |= IOPTE_WRITE;
466 
467 	outs = s = segstart = &sglist[0];
468 	outcount = 1;
469 	incount = nelems;
470 	handle = 0;
471 
472 	/* Init first segment length for backout at failure */
473 	outs->dma_length = 0;
474 
475 	max_seg_size = dma_get_max_seg_size(dev);
476 	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
477 				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
478 	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
479 	for_each_sg(sglist, s, nelems, i) {
480 		unsigned long paddr, npages, entry, out_entry = 0, slen;
481 		iopte_t *base;
482 
483 		slen = s->length;
484 		/* Sanity check */
485 		if (slen == 0) {
486 			dma_next = 0;
487 			continue;
488 		}
489 		/* Allocate iommu entries for that segment */
490 		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
491 		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
492 		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
493 					      &handle, (unsigned long)(-1), 0);
494 
495 		/* Handle failure */
496 		if (unlikely(entry == IOMMU_ERROR_CODE)) {
497 			if (printk_ratelimit())
498 				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
499 				       " npages %lx\n", iommu, paddr, npages);
500 			goto iommu_map_failed;
501 		}
502 
503 		base = iommu->page_table + entry;
504 
505 		/* Convert entry to a dma_addr_t */
506 		dma_addr = iommu->tbl.table_map_base +
507 			(entry << IO_PAGE_SHIFT);
508 		dma_addr |= (s->offset & ~IO_PAGE_MASK);
509 
510 		/* Insert into HW table */
511 		paddr &= IO_PAGE_MASK;
512 		while (npages--) {
513 			iopte_val(*base) = prot | paddr;
514 			base++;
515 			paddr += IO_PAGE_SIZE;
516 		}
517 
518 		/* If we are in an open segment, try merging */
519 		if (segstart != s) {
520 			/* We cannot merge if:
521 			 * - allocated dma_addr isn't contiguous to previous allocation
522 			 */
523 			if ((dma_addr != dma_next) ||
524 			    (outs->dma_length + s->length > max_seg_size) ||
525 			    (is_span_boundary(out_entry, base_shift,
526 					      seg_boundary_size, outs, s))) {
527 				/* Can't merge: create a new segment */
528 				segstart = s;
529 				outcount++;
530 				outs = sg_next(outs);
531 			} else {
532 				outs->dma_length += s->length;
533 			}
534 		}
535 
536 		if (segstart == s) {
537 			/* This is a new segment, fill entries */
538 			outs->dma_address = dma_addr;
539 			outs->dma_length = slen;
540 			out_entry = entry;
541 		}
542 
543 		/* Calculate next page pointer for contiguous check */
544 		dma_next = dma_addr + slen;
545 	}
546 
547 	spin_unlock_irqrestore(&iommu->lock, flags);
548 
549 	if (outcount < incount) {
550 		outs = sg_next(outs);
551 		outs->dma_address = SPARC_MAPPING_ERROR;
552 		outs->dma_length = 0;
553 	}
554 
555 	return outcount;
556 
557 iommu_map_failed:
558 	for_each_sg(sglist, s, nelems, i) {
559 		if (s->dma_length != 0) {
560 			unsigned long vaddr, npages, entry, j;
561 			iopte_t *base;
562 
563 			vaddr = s->dma_address & IO_PAGE_MASK;
564 			npages = iommu_num_pages(s->dma_address, s->dma_length,
565 						 IO_PAGE_SIZE);
566 
567 			entry = (vaddr - iommu->tbl.table_map_base)
568 				>> IO_PAGE_SHIFT;
569 			base = iommu->page_table + entry;
570 
571 			for (j = 0; j < npages; j++)
572 				iopte_make_dummy(iommu, base + j);
573 
574 			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
575 					     IOMMU_ERROR_CODE);
576 
577 			s->dma_address = SPARC_MAPPING_ERROR;
578 			s->dma_length = 0;
579 		}
580 		if (s == outs)
581 			break;
582 	}
583 	spin_unlock_irqrestore(&iommu->lock, flags);
584 
585 	return 0;
586 }
587 
588 /* If contexts are being used, they are the same in all of the mappings
589  * we make for a particular SG.
590  */
591 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
592 {
593 	unsigned long ctx = 0;
594 
595 	if (iommu->iommu_ctxflush) {
596 		iopte_t *base;
597 		u32 bus_addr;
598 		struct iommu_map_table *tbl = &iommu->tbl;
599 
600 		bus_addr = sg->dma_address & IO_PAGE_MASK;
601 		base = iommu->page_table +
602 			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
603 
604 		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
605 	}
606 	return ctx;
607 }
608 
609 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
610 			    int nelems, enum dma_data_direction direction,
611 			    unsigned long attrs)
612 {
613 	unsigned long flags, ctx;
614 	struct scatterlist *sg;
615 	struct strbuf *strbuf;
616 	struct iommu *iommu;
617 
618 	BUG_ON(direction == DMA_NONE);
619 
620 	iommu = dev->archdata.iommu;
621 	strbuf = dev->archdata.stc;
622 
623 	ctx = fetch_sg_ctx(iommu, sglist);
624 
625 	spin_lock_irqsave(&iommu->lock, flags);
626 
627 	sg = sglist;
628 	while (nelems--) {
629 		dma_addr_t dma_handle = sg->dma_address;
630 		unsigned int len = sg->dma_length;
631 		unsigned long npages, entry;
632 		iopte_t *base;
633 		int i;
634 
635 		if (!len)
636 			break;
637 		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
638 
639 		entry = ((dma_handle - iommu->tbl.table_map_base)
640 			 >> IO_PAGE_SHIFT);
641 		base = iommu->page_table + entry;
642 
643 		dma_handle &= IO_PAGE_MASK;
644 		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
645 			strbuf_flush(strbuf, iommu, dma_handle, ctx,
646 				     npages, direction);
647 
648 		for (i = 0; i < npages; i++)
649 			iopte_make_dummy(iommu, base + i);
650 
651 		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
652 				     IOMMU_ERROR_CODE);
653 		sg = sg_next(sg);
654 	}
655 
656 	iommu_free_ctx(iommu, ctx);
657 
658 	spin_unlock_irqrestore(&iommu->lock, flags);
659 }
660 
661 static void dma_4u_sync_single_for_cpu(struct device *dev,
662 				       dma_addr_t bus_addr, size_t sz,
663 				       enum dma_data_direction direction)
664 {
665 	struct iommu *iommu;
666 	struct strbuf *strbuf;
667 	unsigned long flags, ctx, npages;
668 
669 	iommu = dev->archdata.iommu;
670 	strbuf = dev->archdata.stc;
671 
672 	if (!strbuf->strbuf_enabled)
673 		return;
674 
675 	spin_lock_irqsave(&iommu->lock, flags);
676 
677 	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
678 	npages >>= IO_PAGE_SHIFT;
679 	bus_addr &= IO_PAGE_MASK;
680 
681 	/* Step 1: Record the context, if any. */
682 	ctx = 0;
683 	if (iommu->iommu_ctxflush &&
684 	    strbuf->strbuf_ctxflush) {
685 		iopte_t *iopte;
686 		struct iommu_map_table *tbl = &iommu->tbl;
687 
688 		iopte = iommu->page_table +
689 			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
690 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
691 	}
692 
693 	/* Step 2: Kick data out of streaming buffers. */
694 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
695 
696 	spin_unlock_irqrestore(&iommu->lock, flags);
697 }
698 
699 static void dma_4u_sync_sg_for_cpu(struct device *dev,
700 				   struct scatterlist *sglist, int nelems,
701 				   enum dma_data_direction direction)
702 {
703 	struct iommu *iommu;
704 	struct strbuf *strbuf;
705 	unsigned long flags, ctx, npages, i;
706 	struct scatterlist *sg, *sgprv;
707 	u32 bus_addr;
708 
709 	iommu = dev->archdata.iommu;
710 	strbuf = dev->archdata.stc;
711 
712 	if (!strbuf->strbuf_enabled)
713 		return;
714 
715 	spin_lock_irqsave(&iommu->lock, flags);
716 
717 	/* Step 1: Record the context, if any. */
718 	ctx = 0;
719 	if (iommu->iommu_ctxflush &&
720 	    strbuf->strbuf_ctxflush) {
721 		iopte_t *iopte;
722 		struct iommu_map_table *tbl = &iommu->tbl;
723 
724 		iopte = iommu->page_table + ((sglist[0].dma_address -
725 			tbl->table_map_base) >> IO_PAGE_SHIFT);
726 		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
727 	}
728 
729 	/* Step 2: Kick data out of streaming buffers. */
730 	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
731 	sgprv = NULL;
732 	for_each_sg(sglist, sg, nelems, i) {
733 		if (sg->dma_length == 0)
734 			break;
735 		sgprv = sg;
736 	}
737 
738 	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
739 		  - bus_addr) >> IO_PAGE_SHIFT;
740 	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
741 
742 	spin_unlock_irqrestore(&iommu->lock, flags);
743 }
744 
745 static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
746 {
747 	return dma_addr == SPARC_MAPPING_ERROR;
748 }
749 
750 static int dma_4u_supported(struct device *dev, u64 device_mask)
751 {
752 	struct iommu *iommu = dev->archdata.iommu;
753 
754 	if (device_mask > DMA_BIT_MASK(32))
755 		return 0;
756 	if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
757 		return 1;
758 #ifdef CONFIG_PCI
759 	if (dev_is_pci(dev))
760 		return pci64_dma_supported(to_pci_dev(dev), device_mask);
761 #endif
762 	return 0;
763 }
764 
765 static const struct dma_map_ops sun4u_dma_ops = {
766 	.alloc			= dma_4u_alloc_coherent,
767 	.free			= dma_4u_free_coherent,
768 	.map_page		= dma_4u_map_page,
769 	.unmap_page		= dma_4u_unmap_page,
770 	.map_sg			= dma_4u_map_sg,
771 	.unmap_sg		= dma_4u_unmap_sg,
772 	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
773 	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
774 	.dma_supported		= dma_4u_supported,
775 	.mapping_error		= dma_4u_mapping_error,
776 };
777 
778 const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
779 EXPORT_SYMBOL(dma_ops);
780