xref: /openbmc/linux/arch/sparc/mm/iommu.c (revision ebd09753)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 #include <linux/scatterlist.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/mxcc.h>
24 #include <asm/mbus.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
29 #include <asm/dma.h>
30 
31 #include "mm_32.h"
32 
33 /*
34  * This can be sized dynamically, but we will do this
35  * only when we have a guidance about actual I/O pressures.
36  */
37 #define IOMMU_RNGE	IOMMU_RNGE_256MB
38 #define IOMMU_START	0xF0000000
39 #define IOMMU_WINSIZE	(256*1024*1024U)
40 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
41 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
42 
43 static int viking_flush;
44 /* viking.S */
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
47 
48 /*
49  * Values precomputed according to CPU type.
50  */
51 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
53 
54 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 
57 static void __init sbus_iommu_init(struct platform_device *op)
58 {
59 	struct iommu_struct *iommu;
60 	unsigned int impl, vers;
61 	unsigned long *bitmap;
62 	unsigned long control;
63 	unsigned long base;
64 	unsigned long tmp;
65 
66 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
67 	if (!iommu) {
68 		prom_printf("Unable to allocate iommu structure\n");
69 		prom_halt();
70 	}
71 
72 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73 				 "iommu_regs");
74 	if (!iommu->regs) {
75 		prom_printf("Cannot map IOMMU registers\n");
76 		prom_halt();
77 	}
78 
79 	control = sbus_readl(&iommu->regs->control);
80 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 	vers = (control & IOMMU_CTRL_VERS) >> 24;
82 	control &= ~(IOMMU_CTRL_RNGE);
83 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 	sbus_writel(control, &iommu->regs->control);
85 
86 	iommu_invalidate(iommu->regs);
87 	iommu->start = IOMMU_START;
88 	iommu->end = 0xffffffff;
89 
90 	/* Allocate IOMMU page table */
91 	/* Stupid alignment constraints give me a headache.
92 	   We need 256K or 512K or 1M or 2M area aligned to
93            its size and current gfp will fortunately give
94            it to us. */
95         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 	if (!tmp) {
97 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 			    IOMMU_NPTES * sizeof(iopte_t));
99 		prom_halt();
100 	}
101 	iommu->page_table = (iopte_t *)tmp;
102 
103 	/* Initialize new table. */
104 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 	flush_cache_all();
106 	flush_tlb_all();
107 
108 	base = __pa((unsigned long)iommu->page_table) >> 4;
109 	sbus_writel(base, &iommu->regs->base);
110 	iommu_invalidate(iommu->regs);
111 
112 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 	if (!bitmap) {
114 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 			    (int)(IOMMU_NPTES>>3));
116 		prom_halt();
117 	}
118 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 	/* To be coherent on HyperSparc, the page color of DVMA
120 	 * and physical addresses must match.
121 	 */
122 	if (srmmu_modtype == HyperSparc)
123 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 	else
125 		iommu->usemap.num_colors = 1;
126 
127 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 	       impl, vers, iommu->page_table,
129 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130 
131 	op->dev.archdata.iommu = iommu;
132 }
133 
134 static int __init iommu_init(void)
135 {
136 	struct device_node *dp;
137 
138 	for_each_node_by_name(dp, "iommu") {
139 		struct platform_device *op = of_find_device_by_node(dp);
140 
141 		sbus_iommu_init(op);
142 		of_propagate_archdata(op);
143 	}
144 
145 	return 0;
146 }
147 
148 subsys_initcall(iommu_init);
149 
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153 {
154 	unsigned long start;
155 	unsigned long end;
156 
157 	start = (unsigned long)iopte;
158 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
159 	start &= PAGE_MASK;
160 	if (viking_mxcc_present) {
161 		while(start < end) {
162 			viking_mxcc_flush_page(start);
163 			start += PAGE_SIZE;
164 		}
165 	} else if (viking_flush) {
166 		while(start < end) {
167 			viking_flush_page(start);
168 			start += PAGE_SIZE;
169 		}
170 	} else {
171 		while(start < end) {
172 			__flush_page_to_ram(start);
173 			start += PAGE_SIZE;
174 		}
175 	}
176 }
177 
178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
179 {
180 	struct iommu_struct *iommu = dev->archdata.iommu;
181 	int ioptex;
182 	iopte_t *iopte, *iopte0;
183 	unsigned int busa, busa0;
184 	int i;
185 
186 	/* page color = pfn of page */
187 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
188 	if (ioptex < 0)
189 		panic("iommu out");
190 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
191 	iopte0 = &iommu->page_table[ioptex];
192 
193 	busa = busa0;
194 	iopte = iopte0;
195 	for (i = 0; i < npages; i++) {
196 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
197 		iommu_invalidate_page(iommu->regs, busa);
198 		busa += PAGE_SIZE;
199 		iopte++;
200 		page++;
201 	}
202 
203 	iommu_flush_iotlb(iopte0, npages);
204 
205 	return busa0;
206 }
207 
208 static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
209 {
210 	unsigned long off;
211 	int npages;
212 	struct page *page;
213 	u32 busa;
214 
215 	off = (unsigned long)vaddr & ~PAGE_MASK;
216 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
217 	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
218 	busa = iommu_get_one(dev, page, npages);
219 	return busa + off;
220 }
221 
222 static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
223 {
224 	flush_page_for_dma(0);
225 	return iommu_get_scsi_one(dev, vaddr, len);
226 }
227 
228 static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
229 {
230 	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
231 
232 	while(page < ((unsigned long)(vaddr + len))) {
233 		flush_page_for_dma(page);
234 		page += PAGE_SIZE;
235 	}
236 	return iommu_get_scsi_one(dev, vaddr, len);
237 }
238 
239 static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
240 {
241 	int n;
242 
243 	flush_page_for_dma(0);
244 	while (sz != 0) {
245 		--sz;
246 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
247 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
248 		sg->dma_length = sg->length;
249 		sg = sg_next(sg);
250 	}
251 }
252 
253 static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
254 {
255 	unsigned long page, oldpage = 0;
256 	int n, i;
257 
258 	while(sz != 0) {
259 		--sz;
260 
261 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
262 
263 		/*
264 		 * We expect unmapped highmem pages to be not in the cache.
265 		 * XXX Is this a good assumption?
266 		 * XXX What if someone else unmaps it here and races us?
267 		 */
268 		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
269 			for (i = 0; i < n; i++) {
270 				if (page != oldpage) {	/* Already flushed? */
271 					flush_page_for_dma(page);
272 					oldpage = page;
273 				}
274 				page += PAGE_SIZE;
275 			}
276 		}
277 
278 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
279 		sg->dma_length = sg->length;
280 		sg = sg_next(sg);
281 	}
282 }
283 
284 static void iommu_release_one(struct device *dev, u32 busa, int npages)
285 {
286 	struct iommu_struct *iommu = dev->archdata.iommu;
287 	int ioptex;
288 	int i;
289 
290 	BUG_ON(busa < iommu->start);
291 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
292 	for (i = 0; i < npages; i++) {
293 		iopte_val(iommu->page_table[ioptex + i]) = 0;
294 		iommu_invalidate_page(iommu->regs, busa);
295 		busa += PAGE_SIZE;
296 	}
297 	bit_map_clear(&iommu->usemap, ioptex, npages);
298 }
299 
300 static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
301 {
302 	unsigned long off;
303 	int npages;
304 
305 	off = vaddr & ~PAGE_MASK;
306 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
307 	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
308 }
309 
310 static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
311 {
312 	int n;
313 
314 	while(sz != 0) {
315 		--sz;
316 
317 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
318 		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
319 		sg->dma_address = 0x21212121;
320 		sg = sg_next(sg);
321 	}
322 }
323 
324 #ifdef CONFIG_SBUS
325 static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
326 			      unsigned long addr, int len)
327 {
328 	struct iommu_struct *iommu = dev->archdata.iommu;
329 	unsigned long page, end;
330 	iopte_t *iopte = iommu->page_table;
331 	iopte_t *first;
332 	int ioptex;
333 
334 	BUG_ON((va & ~PAGE_MASK) != 0);
335 	BUG_ON((addr & ~PAGE_MASK) != 0);
336 	BUG_ON((len & ~PAGE_MASK) != 0);
337 
338 	/* page color = physical address */
339 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
340 		addr >> PAGE_SHIFT);
341 	if (ioptex < 0)
342 		panic("iommu out");
343 
344 	iopte += ioptex;
345 	first = iopte;
346 	end = addr + len;
347 	while(addr < end) {
348 		page = va;
349 		{
350 			pgd_t *pgdp;
351 			pmd_t *pmdp;
352 			pte_t *ptep;
353 
354 			if (viking_mxcc_present)
355 				viking_mxcc_flush_page(page);
356 			else if (viking_flush)
357 				viking_flush_page(page);
358 			else
359 				__flush_page_to_ram(page);
360 
361 			pgdp = pgd_offset(&init_mm, addr);
362 			pmdp = pmd_offset(pgdp, addr);
363 			ptep = pte_offset_map(pmdp, addr);
364 
365 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
366 		}
367 		iopte_val(*iopte++) =
368 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
369 		addr += PAGE_SIZE;
370 		va += PAGE_SIZE;
371 	}
372 	/* P3: why do we need this?
373 	 *
374 	 * DAVEM: Because there are several aspects, none of which
375 	 *        are handled by a single interface.  Some cpus are
376 	 *        completely not I/O DMA coherent, and some have
377 	 *        virtually indexed caches.  The driver DMA flushing
378 	 *        methods handle the former case, but here during
379 	 *        IOMMU page table modifications, and usage of non-cacheable
380 	 *        cpu mappings of pages potentially in the cpu caches, we have
381 	 *        to handle the latter case as well.
382 	 */
383 	flush_cache_all();
384 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
385 	flush_tlb_all();
386 	iommu_invalidate(iommu->regs);
387 
388 	*pba = iommu->start + (ioptex << PAGE_SHIFT);
389 	return 0;
390 }
391 
392 static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
393 {
394 	struct iommu_struct *iommu = dev->archdata.iommu;
395 	iopte_t *iopte = iommu->page_table;
396 	unsigned long end;
397 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
398 
399 	BUG_ON((busa & ~PAGE_MASK) != 0);
400 	BUG_ON((len & ~PAGE_MASK) != 0);
401 
402 	iopte += ioptex;
403 	end = busa + len;
404 	while (busa < end) {
405 		iopte_val(*iopte++) = 0;
406 		busa += PAGE_SIZE;
407 	}
408 	flush_tlb_all();
409 	iommu_invalidate(iommu->regs);
410 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
411 }
412 #endif
413 
414 static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
415 	.get_scsi_one		= iommu_get_scsi_one_gflush,
416 	.get_scsi_sgl		= iommu_get_scsi_sgl_gflush,
417 	.release_scsi_one	= iommu_release_scsi_one,
418 	.release_scsi_sgl	= iommu_release_scsi_sgl,
419 #ifdef CONFIG_SBUS
420 	.map_dma_area		= iommu_map_dma_area,
421 	.unmap_dma_area		= iommu_unmap_dma_area,
422 #endif
423 };
424 
425 static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
426 	.get_scsi_one		= iommu_get_scsi_one_pflush,
427 	.get_scsi_sgl		= iommu_get_scsi_sgl_pflush,
428 	.release_scsi_one	= iommu_release_scsi_one,
429 	.release_scsi_sgl	= iommu_release_scsi_sgl,
430 #ifdef CONFIG_SBUS
431 	.map_dma_area		= iommu_map_dma_area,
432 	.unmap_dma_area		= iommu_unmap_dma_area,
433 #endif
434 };
435 
436 void __init ld_mmu_iommu(void)
437 {
438 	if (flush_page_for_dma_global) {
439 		/* flush_page_for_dma flushes everything, no matter of what page is it */
440 		sparc32_dma_ops = &iommu_dma_gflush_ops;
441 	} else {
442 		sparc32_dma_ops = &iommu_dma_pflush_ops;
443 	}
444 
445 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
446 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
447 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
448 	} else {
449 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
450 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
451 	}
452 }
453