xref: /openbmc/linux/arch/sparc/mm/io-unit.c (revision ebd09753)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * io-unit.c:  IO-UNIT specific routines for memory management.
4  *
5  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/mm.h>
13 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
14 #include <linux/bitops.h>
15 #include <linux/scatterlist.h>
16 #include <linux/of.h>
17 #include <linux/of_device.h>
18 
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
21 #include <asm/io.h>
22 #include <asm/io-unit.h>
23 #include <asm/mxcc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/dma.h>
27 #include <asm/oplib.h>
28 
29 #include "mm_32.h"
30 
31 /* #define IOUNIT_DEBUG */
32 #ifdef IOUNIT_DEBUG
33 #define IOD(x) printk(x)
34 #else
35 #define IOD(x) do { } while (0)
36 #endif
37 
38 #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
39 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
40 
41 static void __init iounit_iommu_init(struct platform_device *op)
42 {
43 	struct iounit_struct *iounit;
44 	iopte_t __iomem *xpt;
45 	iopte_t __iomem *xptend;
46 
47 	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
48 	if (!iounit) {
49 		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
50 		prom_halt();
51 	}
52 
53 	iounit->limit[0] = IOUNIT_BMAP1_START;
54 	iounit->limit[1] = IOUNIT_BMAP2_START;
55 	iounit->limit[2] = IOUNIT_BMAPM_START;
56 	iounit->limit[3] = IOUNIT_BMAPM_END;
57 	iounit->rotor[1] = IOUNIT_BMAP2_START;
58 	iounit->rotor[2] = IOUNIT_BMAPM_START;
59 
60 	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
61 	if (!xpt) {
62 		prom_printf("SUN4D: Cannot map External Page Table.");
63 		prom_halt();
64 	}
65 
66 	op->dev.archdata.iommu = iounit;
67 	iounit->page_table = xpt;
68 	spin_lock_init(&iounit->lock);
69 
70 	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
71 	for (; xpt < xptend; xpt++)
72 		sbus_writel(0, xpt);
73 }
74 
75 static int __init iounit_init(void)
76 {
77 	extern void sun4d_init_sbi_irq(void);
78 	struct device_node *dp;
79 
80 	for_each_node_by_name(dp, "sbi") {
81 		struct platform_device *op = of_find_device_by_node(dp);
82 
83 		iounit_iommu_init(op);
84 		of_propagate_archdata(op);
85 	}
86 
87 	sun4d_init_sbi_irq();
88 
89 	return 0;
90 }
91 
92 subsys_initcall(iounit_init);
93 
94 /* One has to hold iounit->lock to call this */
95 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
96 {
97 	int i, j, k, npages;
98 	unsigned long rotor, scan, limit;
99 	iopte_t iopte;
100 
101         npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
102 
103 	/* A tiny bit of magic ingredience :) */
104 	switch (npages) {
105 	case 1: i = 0x0231; break;
106 	case 2: i = 0x0132; break;
107 	default: i = 0x0213; break;
108 	}
109 
110 	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
111 
112 next:	j = (i & 15);
113 	rotor = iounit->rotor[j - 1];
114 	limit = iounit->limit[j];
115 	scan = rotor;
116 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
117 	if (scan + npages > limit) {
118 		if (limit != rotor) {
119 			limit = rotor;
120 			scan = iounit->limit[j - 1];
121 			goto nexti;
122 		}
123 		i >>= 4;
124 		if (!(i & 15))
125 			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
126 		goto next;
127 	}
128 	for (k = 1, scan++; k < npages; k++)
129 		if (test_bit(scan++, iounit->bmap))
130 			goto nexti;
131 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
132 	scan -= npages;
133 	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
134 	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
135 	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
136 		set_bit(scan, iounit->bmap);
137 		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
138 	}
139 	IOD(("%08lx\n", vaddr));
140 	return vaddr;
141 }
142 
143 static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
144 {
145 	struct iounit_struct *iounit = dev->archdata.iommu;
146 	unsigned long ret, flags;
147 
148 	spin_lock_irqsave(&iounit->lock, flags);
149 	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
150 	spin_unlock_irqrestore(&iounit->lock, flags);
151 	return ret;
152 }
153 
154 static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
155 {
156 	struct iounit_struct *iounit = dev->archdata.iommu;
157 	unsigned long flags;
158 
159 	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
160 	spin_lock_irqsave(&iounit->lock, flags);
161 	while (sz != 0) {
162 		--sz;
163 		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
164 		sg->dma_length = sg->length;
165 		sg = sg_next(sg);
166 	}
167 	spin_unlock_irqrestore(&iounit->lock, flags);
168 }
169 
170 static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
171 {
172 	struct iounit_struct *iounit = dev->archdata.iommu;
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&iounit->lock, flags);
176 	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
177 	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
178 	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
179 	for (len += vaddr; vaddr < len; vaddr++)
180 		clear_bit(vaddr, iounit->bmap);
181 	spin_unlock_irqrestore(&iounit->lock, flags);
182 }
183 
184 static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
185 {
186 	struct iounit_struct *iounit = dev->archdata.iommu;
187 	unsigned long flags;
188 	unsigned long vaddr, len;
189 
190 	spin_lock_irqsave(&iounit->lock, flags);
191 	while (sz != 0) {
192 		--sz;
193 		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
194 		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
195 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
196 		for (len += vaddr; vaddr < len; vaddr++)
197 			clear_bit(vaddr, iounit->bmap);
198 		sg = sg_next(sg);
199 	}
200 	spin_unlock_irqrestore(&iounit->lock, flags);
201 }
202 
203 #ifdef CONFIG_SBUS
204 static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
205 {
206 	struct iounit_struct *iounit = dev->archdata.iommu;
207 	unsigned long page, end;
208 	pgprot_t dvma_prot;
209 	iopte_t __iomem *iopte;
210 
211 	*pba = addr;
212 
213 	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
214 	end = PAGE_ALIGN((addr + len));
215 	while(addr < end) {
216 		page = va;
217 		{
218 			pgd_t *pgdp;
219 			pmd_t *pmdp;
220 			pte_t *ptep;
221 			long i;
222 
223 			pgdp = pgd_offset(&init_mm, addr);
224 			pmdp = pmd_offset(pgdp, addr);
225 			ptep = pte_offset_map(pmdp, addr);
226 
227 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
228 
229 			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
230 
231 			iopte = iounit->page_table + i;
232 			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
233 		}
234 		addr += PAGE_SIZE;
235 		va += PAGE_SIZE;
236 	}
237 	flush_cache_all();
238 	flush_tlb_all();
239 
240 	return 0;
241 }
242 
243 static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
244 {
245 	/* XXX Somebody please fill this in */
246 }
247 #endif
248 
249 static const struct sparc32_dma_ops iounit_dma_ops = {
250 	.get_scsi_one		= iounit_get_scsi_one,
251 	.get_scsi_sgl		= iounit_get_scsi_sgl,
252 	.release_scsi_one	= iounit_release_scsi_one,
253 	.release_scsi_sgl	= iounit_release_scsi_sgl,
254 #ifdef CONFIG_SBUS
255 	.map_dma_area		= iounit_map_dma_area,
256 	.unmap_dma_area		= iounit_unmap_dma_area,
257 #endif
258 };
259 
260 void __init ld_mmu_iounit(void)
261 {
262 	sparc32_dma_ops = &iounit_dma_ops;
263 }
264