xref: /openbmc/linux/arch/sparc/mm/io-unit.c (revision 367b8112)
1 /*
2  * io-unit.c:  IO-UNIT specific routines for memory management.
3  *
4  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/spinlock.h>
11 #include <linux/mm.h>
12 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
13 #include <linux/bitops.h>
14 #include <linux/scatterlist.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 
18 #include <asm/pgalloc.h>
19 #include <asm/pgtable.h>
20 #include <asm/io.h>
21 #include <asm/io-unit.h>
22 #include <asm/mxcc.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/dma.h>
26 #include <asm/oplib.h>
27 
28 /* #define IOUNIT_DEBUG */
29 #ifdef IOUNIT_DEBUG
30 #define IOD(x) printk(x)
31 #else
32 #define IOD(x) do { } while (0)
33 #endif
34 
35 #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
36 #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
37 
38 static void __init iounit_iommu_init(struct of_device *op)
39 {
40 	struct iounit_struct *iounit;
41 	iopte_t *xpt, *xptend;
42 
43 	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
44 	if (!iounit) {
45 		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
46 		prom_halt();
47 	}
48 
49 	iounit->limit[0] = IOUNIT_BMAP1_START;
50 	iounit->limit[1] = IOUNIT_BMAP2_START;
51 	iounit->limit[2] = IOUNIT_BMAPM_START;
52 	iounit->limit[3] = IOUNIT_BMAPM_END;
53 	iounit->rotor[1] = IOUNIT_BMAP2_START;
54 	iounit->rotor[2] = IOUNIT_BMAPM_START;
55 
56 	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
57 	if (!xpt) {
58 		prom_printf("SUN4D: Cannot map External Page Table.");
59 		prom_halt();
60 	}
61 
62 	op->dev.archdata.iommu = iounit;
63 	iounit->page_table = xpt;
64 	spin_lock_init(&iounit->lock);
65 
66 	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
67 	     xpt < xptend;)
68 	     	iopte_val(*xpt++) = 0;
69 }
70 
71 static int __init iounit_init(void)
72 {
73 	extern void sun4d_init_sbi_irq(void);
74 	struct device_node *dp;
75 
76 	for_each_node_by_name(dp, "sbi") {
77 		struct of_device *op = of_find_device_by_node(dp);
78 
79 		iounit_iommu_init(op);
80 		of_propagate_archdata(op);
81 	}
82 
83 	sun4d_init_sbi_irq();
84 
85 	return 0;
86 }
87 
88 subsys_initcall(iounit_init);
89 
90 /* One has to hold iounit->lock to call this */
91 static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
92 {
93 	int i, j, k, npages;
94 	unsigned long rotor, scan, limit;
95 	iopte_t iopte;
96 
97         npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
98 
99 	/* A tiny bit of magic ingredience :) */
100 	switch (npages) {
101 	case 1: i = 0x0231; break;
102 	case 2: i = 0x0132; break;
103 	default: i = 0x0213; break;
104 	}
105 
106 	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
107 
108 next:	j = (i & 15);
109 	rotor = iounit->rotor[j - 1];
110 	limit = iounit->limit[j];
111 	scan = rotor;
112 nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
113 	if (scan + npages > limit) {
114 		if (limit != rotor) {
115 			limit = rotor;
116 			scan = iounit->limit[j - 1];
117 			goto nexti;
118 		}
119 		i >>= 4;
120 		if (!(i & 15))
121 			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
122 		goto next;
123 	}
124 	for (k = 1, scan++; k < npages; k++)
125 		if (test_bit(scan++, iounit->bmap))
126 			goto nexti;
127 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
128 	scan -= npages;
129 	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
130 	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131 	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132 		set_bit(scan, iounit->bmap);
133 		iounit->page_table[scan] = iopte;
134 	}
135 	IOD(("%08lx\n", vaddr));
136 	return vaddr;
137 }
138 
139 static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
140 {
141 	struct iounit_struct *iounit = dev->archdata.iommu;
142 	unsigned long ret, flags;
143 
144 	spin_lock_irqsave(&iounit->lock, flags);
145 	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
146 	spin_unlock_irqrestore(&iounit->lock, flags);
147 	return ret;
148 }
149 
150 static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
151 {
152 	struct iounit_struct *iounit = dev->archdata.iommu;
153 	unsigned long flags;
154 
155 	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
156 	spin_lock_irqsave(&iounit->lock, flags);
157 	while (sz != 0) {
158 		--sz;
159 		sg->dvma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
160 		sg->dvma_length = sg->length;
161 		sg = sg_next(sg);
162 	}
163 	spin_unlock_irqrestore(&iounit->lock, flags);
164 }
165 
166 static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
167 {
168 	struct iounit_struct *iounit = dev->archdata.iommu;
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(&iounit->lock, flags);
172 	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173 	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174 	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175 	for (len += vaddr; vaddr < len; vaddr++)
176 		clear_bit(vaddr, iounit->bmap);
177 	spin_unlock_irqrestore(&iounit->lock, flags);
178 }
179 
180 static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
181 {
182 	struct iounit_struct *iounit = dev->archdata.iommu;
183 	unsigned long flags;
184 	unsigned long vaddr, len;
185 
186 	spin_lock_irqsave(&iounit->lock, flags);
187 	while (sz != 0) {
188 		--sz;
189 		len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
190 		vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
191 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
192 		for (len += vaddr; vaddr < len; vaddr++)
193 			clear_bit(vaddr, iounit->bmap);
194 		sg = sg_next(sg);
195 	}
196 	spin_unlock_irqrestore(&iounit->lock, flags);
197 }
198 
199 #ifdef CONFIG_SBUS
200 static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
201 {
202 	struct iounit_struct *iounit = dev->archdata.iommu;
203 	unsigned long page, end;
204 	pgprot_t dvma_prot;
205 	iopte_t *iopte;
206 
207 	*pba = addr;
208 
209 	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
210 	end = PAGE_ALIGN((addr + len));
211 	while(addr < end) {
212 		page = va;
213 		{
214 			pgd_t *pgdp;
215 			pmd_t *pmdp;
216 			pte_t *ptep;
217 			long i;
218 
219 			pgdp = pgd_offset(&init_mm, addr);
220 			pmdp = pmd_offset(pgdp, addr);
221 			ptep = pte_offset_map(pmdp, addr);
222 
223 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
224 
225 			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
226 
227 			iopte = (iopte_t *)(iounit->page_table + i);
228 			*iopte = MKIOPTE(__pa(page));
229 		}
230 		addr += PAGE_SIZE;
231 		va += PAGE_SIZE;
232 	}
233 	flush_cache_all();
234 	flush_tlb_all();
235 
236 	return 0;
237 }
238 
239 static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
240 {
241 	/* XXX Somebody please fill this in */
242 }
243 #endif
244 
245 static char *iounit_lockarea(char *vaddr, unsigned long len)
246 {
247 /* FIXME: Write this */
248 	return vaddr;
249 }
250 
251 static void iounit_unlockarea(char *vaddr, unsigned long len)
252 {
253 /* FIXME: Write this */
254 }
255 
256 void __init ld_mmu_iounit(void)
257 {
258 	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
259 	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
260 
261 	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
262 	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
263 	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
264 	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
265 
266 #ifdef CONFIG_SBUS
267 	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
268 	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
269 #endif
270 }
271