xref: /openbmc/linux/arch/sparc/mm/iommu.c (revision 023e4163)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/mxcc.h>
24 #include <asm/mbus.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
29 #include <asm/dma.h>
30 
31 #include "mm_32.h"
32 
33 /*
34  * This can be sized dynamically, but we will do this
35  * only when we have a guidance about actual I/O pressures.
36  */
37 #define IOMMU_RNGE	IOMMU_RNGE_256MB
38 #define IOMMU_START	0xF0000000
39 #define IOMMU_WINSIZE	(256*1024*1024U)
40 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
41 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
42 
43 static int viking_flush;
44 /* viking.S */
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
47 
48 /*
49  * Values precomputed according to CPU type.
50  */
51 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
53 
54 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 
57 static void __init sbus_iommu_init(struct platform_device *op)
58 {
59 	struct iommu_struct *iommu;
60 	unsigned int impl, vers;
61 	unsigned long *bitmap;
62 	unsigned long control;
63 	unsigned long base;
64 	unsigned long tmp;
65 
66 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
67 	if (!iommu) {
68 		prom_printf("Unable to allocate iommu structure\n");
69 		prom_halt();
70 	}
71 
72 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73 				 "iommu_regs");
74 	if (!iommu->regs) {
75 		prom_printf("Cannot map IOMMU registers\n");
76 		prom_halt();
77 	}
78 
79 	control = sbus_readl(&iommu->regs->control);
80 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 	vers = (control & IOMMU_CTRL_VERS) >> 24;
82 	control &= ~(IOMMU_CTRL_RNGE);
83 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 	sbus_writel(control, &iommu->regs->control);
85 
86 	iommu_invalidate(iommu->regs);
87 	iommu->start = IOMMU_START;
88 	iommu->end = 0xffffffff;
89 
90 	/* Allocate IOMMU page table */
91 	/* Stupid alignment constraints give me a headache.
92 	   We need 256K or 512K or 1M or 2M area aligned to
93            its size and current gfp will fortunately give
94            it to us. */
95         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 	if (!tmp) {
97 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 			    IOMMU_NPTES * sizeof(iopte_t));
99 		prom_halt();
100 	}
101 	iommu->page_table = (iopte_t *)tmp;
102 
103 	/* Initialize new table. */
104 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 	flush_cache_all();
106 	flush_tlb_all();
107 
108 	base = __pa((unsigned long)iommu->page_table) >> 4;
109 	sbus_writel(base, &iommu->regs->base);
110 	iommu_invalidate(iommu->regs);
111 
112 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 	if (!bitmap) {
114 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 			    (int)(IOMMU_NPTES>>3));
116 		prom_halt();
117 	}
118 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 	/* To be coherent on HyperSparc, the page color of DVMA
120 	 * and physical addresses must match.
121 	 */
122 	if (srmmu_modtype == HyperSparc)
123 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 	else
125 		iommu->usemap.num_colors = 1;
126 
127 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 	       impl, vers, iommu->page_table,
129 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130 
131 	op->dev.archdata.iommu = iommu;
132 }
133 
134 static int __init iommu_init(void)
135 {
136 	struct device_node *dp;
137 
138 	for_each_node_by_name(dp, "iommu") {
139 		struct platform_device *op = of_find_device_by_node(dp);
140 
141 		sbus_iommu_init(op);
142 		of_propagate_archdata(op);
143 	}
144 
145 	return 0;
146 }
147 
148 subsys_initcall(iommu_init);
149 
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153 {
154 	unsigned long start;
155 	unsigned long end;
156 
157 	start = (unsigned long)iopte;
158 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
159 	start &= PAGE_MASK;
160 	if (viking_mxcc_present) {
161 		while(start < end) {
162 			viking_mxcc_flush_page(start);
163 			start += PAGE_SIZE;
164 		}
165 	} else if (viking_flush) {
166 		while(start < end) {
167 			viking_flush_page(start);
168 			start += PAGE_SIZE;
169 		}
170 	} else {
171 		while(start < end) {
172 			__flush_page_to_ram(start);
173 			start += PAGE_SIZE;
174 		}
175 	}
176 }
177 
178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
179 {
180 	struct iommu_struct *iommu = dev->archdata.iommu;
181 	int ioptex;
182 	iopte_t *iopte, *iopte0;
183 	unsigned int busa, busa0;
184 	int i;
185 
186 	/* page color = pfn of page */
187 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
188 	if (ioptex < 0)
189 		panic("iommu out");
190 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
191 	iopte0 = &iommu->page_table[ioptex];
192 
193 	busa = busa0;
194 	iopte = iopte0;
195 	for (i = 0; i < npages; i++) {
196 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
197 		iommu_invalidate_page(iommu->regs, busa);
198 		busa += PAGE_SIZE;
199 		iopte++;
200 		page++;
201 	}
202 
203 	iommu_flush_iotlb(iopte0, npages);
204 
205 	return busa0;
206 }
207 
208 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
209 		unsigned long offset, size_t len)
210 {
211 	void *vaddr = page_address(page) + offset;
212 	unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
213 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
214 
215 	/* XXX So what is maxphys for us and how do drivers know it? */
216 	if (!len || len > 256 * 1024)
217 		return DMA_MAPPING_ERROR;
218 	return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
219 }
220 
221 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
222 		struct page *page, unsigned long offset, size_t len,
223 		enum dma_data_direction dir, unsigned long attrs)
224 {
225 	flush_page_for_dma(0);
226 	return __sbus_iommu_map_page(dev, page, offset, len);
227 }
228 
229 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
230 		struct page *page, unsigned long offset, size_t len,
231 		enum dma_data_direction dir, unsigned long attrs)
232 {
233 	void *vaddr = page_address(page) + offset;
234 	unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
235 
236 	while (p < (unsigned long)vaddr + len) {
237 		flush_page_for_dma(p);
238 		p += PAGE_SIZE;
239 	}
240 
241 	return __sbus_iommu_map_page(dev, page, offset, len);
242 }
243 
244 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
245 		int nents, enum dma_data_direction dir, unsigned long attrs)
246 {
247 	struct scatterlist *sg;
248 	int i, n;
249 
250 	flush_page_for_dma(0);
251 
252 	for_each_sg(sgl, sg, nents, i) {
253 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
254 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
255 		sg->dma_length = sg->length;
256 	}
257 
258 	return nents;
259 }
260 
261 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
262 		int nents, enum dma_data_direction dir, unsigned long attrs)
263 {
264 	unsigned long page, oldpage = 0;
265 	struct scatterlist *sg;
266 	int i, j, n;
267 
268 	for_each_sg(sgl, sg, nents, j) {
269 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270 
271 		/*
272 		 * We expect unmapped highmem pages to be not in the cache.
273 		 * XXX Is this a good assumption?
274 		 * XXX What if someone else unmaps it here and races us?
275 		 */
276 		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
277 			for (i = 0; i < n; i++) {
278 				if (page != oldpage) {	/* Already flushed? */
279 					flush_page_for_dma(page);
280 					oldpage = page;
281 				}
282 				page += PAGE_SIZE;
283 			}
284 		}
285 
286 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
287 		sg->dma_length = sg->length;
288 	}
289 
290 	return nents;
291 }
292 
293 static void iommu_release_one(struct device *dev, u32 busa, int npages)
294 {
295 	struct iommu_struct *iommu = dev->archdata.iommu;
296 	int ioptex;
297 	int i;
298 
299 	BUG_ON(busa < iommu->start);
300 	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
301 	for (i = 0; i < npages; i++) {
302 		iopte_val(iommu->page_table[ioptex + i]) = 0;
303 		iommu_invalidate_page(iommu->regs, busa);
304 		busa += PAGE_SIZE;
305 	}
306 	bit_map_clear(&iommu->usemap, ioptex, npages);
307 }
308 
309 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
310 		size_t len, enum dma_data_direction dir, unsigned long attrs)
311 {
312 	unsigned long off = dma_addr & ~PAGE_MASK;
313 	int npages;
314 
315 	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
316 	iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
317 }
318 
319 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
320 		int nents, enum dma_data_direction dir, unsigned long attrs)
321 {
322 	struct scatterlist *sg;
323 	int i, n;
324 
325 	for_each_sg(sgl, sg, nents, i) {
326 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
327 		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
328 		sg->dma_address = 0x21212121;
329 	}
330 }
331 
332 #ifdef CONFIG_SBUS
333 static void *sbus_iommu_alloc(struct device *dev, size_t len,
334 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
335 {
336 	struct iommu_struct *iommu = dev->archdata.iommu;
337 	unsigned long va, addr, page, end, ret;
338 	iopte_t *iopte = iommu->page_table;
339 	iopte_t *first;
340 	int ioptex;
341 
342 	/* XXX So what is maxphys for us and how do drivers know it? */
343 	if (!len || len > 256 * 1024)
344 		return NULL;
345 
346 	len = PAGE_ALIGN(len);
347 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
348 	if (va == 0)
349 		return NULL;
350 
351 	addr = ret = sparc_dma_alloc_resource(dev, len);
352 	if (!addr)
353 		goto out_free_pages;
354 
355 	BUG_ON((va & ~PAGE_MASK) != 0);
356 	BUG_ON((addr & ~PAGE_MASK) != 0);
357 	BUG_ON((len & ~PAGE_MASK) != 0);
358 
359 	/* page color = physical address */
360 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
361 		addr >> PAGE_SHIFT);
362 	if (ioptex < 0)
363 		panic("iommu out");
364 
365 	iopte += ioptex;
366 	first = iopte;
367 	end = addr + len;
368 	while(addr < end) {
369 		page = va;
370 		{
371 			pgd_t *pgdp;
372 			pmd_t *pmdp;
373 			pte_t *ptep;
374 
375 			if (viking_mxcc_present)
376 				viking_mxcc_flush_page(page);
377 			else if (viking_flush)
378 				viking_flush_page(page);
379 			else
380 				__flush_page_to_ram(page);
381 
382 			pgdp = pgd_offset(&init_mm, addr);
383 			pmdp = pmd_offset(pgdp, addr);
384 			ptep = pte_offset_map(pmdp, addr);
385 
386 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
387 		}
388 		iopte_val(*iopte++) =
389 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
390 		addr += PAGE_SIZE;
391 		va += PAGE_SIZE;
392 	}
393 	/* P3: why do we need this?
394 	 *
395 	 * DAVEM: Because there are several aspects, none of which
396 	 *        are handled by a single interface.  Some cpus are
397 	 *        completely not I/O DMA coherent, and some have
398 	 *        virtually indexed caches.  The driver DMA flushing
399 	 *        methods handle the former case, but here during
400 	 *        IOMMU page table modifications, and usage of non-cacheable
401 	 *        cpu mappings of pages potentially in the cpu caches, we have
402 	 *        to handle the latter case as well.
403 	 */
404 	flush_cache_all();
405 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
406 	flush_tlb_all();
407 	iommu_invalidate(iommu->regs);
408 
409 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
410 	return (void *)ret;
411 
412 out_free_pages:
413 	free_pages(va, get_order(len));
414 	return NULL;
415 }
416 
417 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
418 			       dma_addr_t busa, unsigned long attrs)
419 {
420 	struct iommu_struct *iommu = dev->archdata.iommu;
421 	iopte_t *iopte = iommu->page_table;
422 	struct page *page = virt_to_page(cpu_addr);
423 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
424 	unsigned long end;
425 
426 	if (!sparc_dma_free_resource(cpu_addr, len))
427 		return;
428 
429 	BUG_ON((busa & ~PAGE_MASK) != 0);
430 	BUG_ON((len & ~PAGE_MASK) != 0);
431 
432 	iopte += ioptex;
433 	end = busa + len;
434 	while (busa < end) {
435 		iopte_val(*iopte++) = 0;
436 		busa += PAGE_SIZE;
437 	}
438 	flush_tlb_all();
439 	iommu_invalidate(iommu->regs);
440 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
441 
442 	__free_pages(page, get_order(len));
443 }
444 #endif
445 
446 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
447 #ifdef CONFIG_SBUS
448 	.alloc			= sbus_iommu_alloc,
449 	.free			= sbus_iommu_free,
450 #endif
451 	.map_page		= sbus_iommu_map_page_gflush,
452 	.unmap_page		= sbus_iommu_unmap_page,
453 	.map_sg			= sbus_iommu_map_sg_gflush,
454 	.unmap_sg		= sbus_iommu_unmap_sg,
455 };
456 
457 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
458 #ifdef CONFIG_SBUS
459 	.alloc			= sbus_iommu_alloc,
460 	.free			= sbus_iommu_free,
461 #endif
462 	.map_page		= sbus_iommu_map_page_pflush,
463 	.unmap_page		= sbus_iommu_unmap_page,
464 	.map_sg			= sbus_iommu_map_sg_pflush,
465 	.unmap_sg		= sbus_iommu_unmap_sg,
466 };
467 
468 void __init ld_mmu_iommu(void)
469 {
470 	if (flush_page_for_dma_global) {
471 		/* flush_page_for_dma flushes everything, no matter of what page is it */
472 		dma_ops = &sbus_iommu_dma_gflush_ops;
473 	} else {
474 		dma_ops = &sbus_iommu_dma_pflush_ops;
475 	}
476 
477 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
478 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
479 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
480 	} else {
481 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
482 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
483 	}
484 }
485