xref: /openbmc/linux/arch/sparc/mm/iommu.c (revision f25b23bc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/mxcc.h>
24 #include <asm/mbus.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
29 #include <asm/dma.h>
30 
31 #include "mm_32.h"
32 
33 /*
34  * This can be sized dynamically, but we will do this
35  * only when we have a guidance about actual I/O pressures.
36  */
37 #define IOMMU_RNGE	IOMMU_RNGE_256MB
38 #define IOMMU_START	0xF0000000
39 #define IOMMU_WINSIZE	(256*1024*1024U)
40 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
41 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
42 
43 static int viking_flush;
44 /* viking.S */
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
47 
48 /*
49  * Values precomputed according to CPU type.
50  */
51 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
53 
54 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 
57 static void __init sbus_iommu_init(struct platform_device *op)
58 {
59 	struct iommu_struct *iommu;
60 	unsigned int impl, vers;
61 	unsigned long *bitmap;
62 	unsigned long control;
63 	unsigned long base;
64 	unsigned long tmp;
65 
66 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
67 	if (!iommu) {
68 		prom_printf("Unable to allocate iommu structure\n");
69 		prom_halt();
70 	}
71 
72 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73 				 "iommu_regs");
74 	if (!iommu->regs) {
75 		prom_printf("Cannot map IOMMU registers\n");
76 		prom_halt();
77 	}
78 
79 	control = sbus_readl(&iommu->regs->control);
80 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 	vers = (control & IOMMU_CTRL_VERS) >> 24;
82 	control &= ~(IOMMU_CTRL_RNGE);
83 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 	sbus_writel(control, &iommu->regs->control);
85 
86 	iommu_invalidate(iommu->regs);
87 	iommu->start = IOMMU_START;
88 	iommu->end = 0xffffffff;
89 
90 	/* Allocate IOMMU page table */
91 	/* Stupid alignment constraints give me a headache.
92 	   We need 256K or 512K or 1M or 2M area aligned to
93            its size and current gfp will fortunately give
94            it to us. */
95         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 	if (!tmp) {
97 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 			    IOMMU_NPTES * sizeof(iopte_t));
99 		prom_halt();
100 	}
101 	iommu->page_table = (iopte_t *)tmp;
102 
103 	/* Initialize new table. */
104 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 	flush_cache_all();
106 	flush_tlb_all();
107 
108 	base = __pa((unsigned long)iommu->page_table) >> 4;
109 	sbus_writel(base, &iommu->regs->base);
110 	iommu_invalidate(iommu->regs);
111 
112 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 	if (!bitmap) {
114 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 			    (int)(IOMMU_NPTES>>3));
116 		prom_halt();
117 	}
118 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 	/* To be coherent on HyperSparc, the page color of DVMA
120 	 * and physical addresses must match.
121 	 */
122 	if (srmmu_modtype == HyperSparc)
123 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 	else
125 		iommu->usemap.num_colors = 1;
126 
127 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 	       impl, vers, iommu->page_table,
129 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130 
131 	op->dev.archdata.iommu = iommu;
132 }
133 
134 static int __init iommu_init(void)
135 {
136 	struct device_node *dp;
137 
138 	for_each_node_by_name(dp, "iommu") {
139 		struct platform_device *op = of_find_device_by_node(dp);
140 
141 		sbus_iommu_init(op);
142 		of_propagate_archdata(op);
143 	}
144 
145 	return 0;
146 }
147 
148 subsys_initcall(iommu_init);
149 
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153 {
154 	unsigned long start;
155 	unsigned long end;
156 
157 	start = (unsigned long)iopte;
158 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
159 	start &= PAGE_MASK;
160 	if (viking_mxcc_present) {
161 		while(start < end) {
162 			viking_mxcc_flush_page(start);
163 			start += PAGE_SIZE;
164 		}
165 	} else if (viking_flush) {
166 		while(start < end) {
167 			viking_flush_page(start);
168 			start += PAGE_SIZE;
169 		}
170 	} else {
171 		while(start < end) {
172 			__flush_page_to_ram(start);
173 			start += PAGE_SIZE;
174 		}
175 	}
176 }
177 
178 static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
179 {
180 	struct iommu_struct *iommu = dev->archdata.iommu;
181 	int ioptex;
182 	iopte_t *iopte, *iopte0;
183 	unsigned int busa, busa0;
184 	int i;
185 
186 	/* page color = pfn of page */
187 	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
188 	if (ioptex < 0)
189 		panic("iommu out");
190 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
191 	iopte0 = &iommu->page_table[ioptex];
192 
193 	busa = busa0;
194 	iopte = iopte0;
195 	for (i = 0; i < npages; i++) {
196 		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
197 		iommu_invalidate_page(iommu->regs, busa);
198 		busa += PAGE_SIZE;
199 		iopte++;
200 		page++;
201 	}
202 
203 	iommu_flush_iotlb(iopte0, npages);
204 
205 	return busa0;
206 }
207 
208 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
209 		unsigned long offset, size_t len)
210 {
211 	void *vaddr = page_address(page) + offset;
212 	unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
213 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
214 
215 	/* XXX So what is maxphys for us and how do drivers know it? */
216 	if (!len || len > 256 * 1024)
217 		return DMA_MAPPING_ERROR;
218 	return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
219 }
220 
221 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
222 		struct page *page, unsigned long offset, size_t len,
223 		enum dma_data_direction dir, unsigned long attrs)
224 {
225 	flush_page_for_dma(0);
226 	return __sbus_iommu_map_page(dev, page, offset, len);
227 }
228 
229 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
230 		struct page *page, unsigned long offset, size_t len,
231 		enum dma_data_direction dir, unsigned long attrs)
232 {
233 	void *vaddr = page_address(page) + offset;
234 	unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
235 
236 	while (p < (unsigned long)vaddr + len) {
237 		flush_page_for_dma(p);
238 		p += PAGE_SIZE;
239 	}
240 
241 	return __sbus_iommu_map_page(dev, page, offset, len);
242 }
243 
244 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
245 		int nents, enum dma_data_direction dir, unsigned long attrs)
246 {
247 	struct scatterlist *sg;
248 	int i, n;
249 
250 	flush_page_for_dma(0);
251 
252 	for_each_sg(sgl, sg, nents, i) {
253 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
254 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
255 		sg->dma_length = sg->length;
256 	}
257 
258 	return nents;
259 }
260 
261 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
262 		int nents, enum dma_data_direction dir, unsigned long attrs)
263 {
264 	unsigned long page, oldpage = 0;
265 	struct scatterlist *sg;
266 	int i, j, n;
267 
268 	for_each_sg(sgl, sg, nents, j) {
269 		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
270 
271 		/*
272 		 * We expect unmapped highmem pages to be not in the cache.
273 		 * XXX Is this a good assumption?
274 		 * XXX What if someone else unmaps it here and races us?
275 		 */
276 		if (!PageHighMem(sg_page(sg))) {
277 			page = (unsigned long)page_address(sg_page(sg));
278 			for (i = 0; i < n; i++) {
279 				if (page != oldpage) {	/* Already flushed? */
280 					flush_page_for_dma(page);
281 					oldpage = page;
282 				}
283 				page += PAGE_SIZE;
284 			}
285 		}
286 
287 		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
288 		sg->dma_length = sg->length;
289 	}
290 
291 	return nents;
292 }
293 
294 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
295 		size_t len, enum dma_data_direction dir, unsigned long attrs)
296 {
297 	struct iommu_struct *iommu = dev->archdata.iommu;
298 	unsigned int busa = dma_addr & PAGE_MASK;
299 	unsigned long off = dma_addr & ~PAGE_MASK;
300 	unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
301 	unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
302 	unsigned int i;
303 
304 	BUG_ON(busa < iommu->start);
305 	for (i = 0; i < npages; i++) {
306 		iopte_val(iommu->page_table[ioptex + i]) = 0;
307 		iommu_invalidate_page(iommu->regs, busa);
308 		busa += PAGE_SIZE;
309 	}
310 	bit_map_clear(&iommu->usemap, ioptex, npages);
311 }
312 
313 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
314 		int nents, enum dma_data_direction dir, unsigned long attrs)
315 {
316 	struct scatterlist *sg;
317 	int i;
318 
319 	for_each_sg(sgl, sg, nents, i) {
320 		sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
321 				attrs);
322 		sg->dma_address = 0x21212121;
323 	}
324 }
325 
326 #ifdef CONFIG_SBUS
327 static void *sbus_iommu_alloc(struct device *dev, size_t len,
328 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
329 {
330 	struct iommu_struct *iommu = dev->archdata.iommu;
331 	unsigned long va, addr, page, end, ret;
332 	iopte_t *iopte = iommu->page_table;
333 	iopte_t *first;
334 	int ioptex;
335 
336 	/* XXX So what is maxphys for us and how do drivers know it? */
337 	if (!len || len > 256 * 1024)
338 		return NULL;
339 
340 	len = PAGE_ALIGN(len);
341 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
342 	if (va == 0)
343 		return NULL;
344 
345 	addr = ret = sparc_dma_alloc_resource(dev, len);
346 	if (!addr)
347 		goto out_free_pages;
348 
349 	BUG_ON((va & ~PAGE_MASK) != 0);
350 	BUG_ON((addr & ~PAGE_MASK) != 0);
351 	BUG_ON((len & ~PAGE_MASK) != 0);
352 
353 	/* page color = physical address */
354 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
355 		addr >> PAGE_SHIFT);
356 	if (ioptex < 0)
357 		panic("iommu out");
358 
359 	iopte += ioptex;
360 	first = iopte;
361 	end = addr + len;
362 	while(addr < end) {
363 		page = va;
364 		{
365 			pgd_t *pgdp;
366 			pmd_t *pmdp;
367 			pte_t *ptep;
368 
369 			if (viking_mxcc_present)
370 				viking_mxcc_flush_page(page);
371 			else if (viking_flush)
372 				viking_flush_page(page);
373 			else
374 				__flush_page_to_ram(page);
375 
376 			pgdp = pgd_offset(&init_mm, addr);
377 			pmdp = pmd_offset(pgdp, addr);
378 			ptep = pte_offset_map(pmdp, addr);
379 
380 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
381 		}
382 		iopte_val(*iopte++) =
383 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
384 		addr += PAGE_SIZE;
385 		va += PAGE_SIZE;
386 	}
387 	/* P3: why do we need this?
388 	 *
389 	 * DAVEM: Because there are several aspects, none of which
390 	 *        are handled by a single interface.  Some cpus are
391 	 *        completely not I/O DMA coherent, and some have
392 	 *        virtually indexed caches.  The driver DMA flushing
393 	 *        methods handle the former case, but here during
394 	 *        IOMMU page table modifications, and usage of non-cacheable
395 	 *        cpu mappings of pages potentially in the cpu caches, we have
396 	 *        to handle the latter case as well.
397 	 */
398 	flush_cache_all();
399 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
400 	flush_tlb_all();
401 	iommu_invalidate(iommu->regs);
402 
403 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
404 	return (void *)ret;
405 
406 out_free_pages:
407 	free_pages(va, get_order(len));
408 	return NULL;
409 }
410 
411 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
412 			       dma_addr_t busa, unsigned long attrs)
413 {
414 	struct iommu_struct *iommu = dev->archdata.iommu;
415 	iopte_t *iopte = iommu->page_table;
416 	struct page *page = virt_to_page(cpu_addr);
417 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
418 	unsigned long end;
419 
420 	if (!sparc_dma_free_resource(cpu_addr, len))
421 		return;
422 
423 	BUG_ON((busa & ~PAGE_MASK) != 0);
424 	BUG_ON((len & ~PAGE_MASK) != 0);
425 
426 	iopte += ioptex;
427 	end = busa + len;
428 	while (busa < end) {
429 		iopte_val(*iopte++) = 0;
430 		busa += PAGE_SIZE;
431 	}
432 	flush_tlb_all();
433 	iommu_invalidate(iommu->regs);
434 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
435 
436 	__free_pages(page, get_order(len));
437 }
438 #endif
439 
440 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
441 #ifdef CONFIG_SBUS
442 	.alloc			= sbus_iommu_alloc,
443 	.free			= sbus_iommu_free,
444 #endif
445 	.map_page		= sbus_iommu_map_page_gflush,
446 	.unmap_page		= sbus_iommu_unmap_page,
447 	.map_sg			= sbus_iommu_map_sg_gflush,
448 	.unmap_sg		= sbus_iommu_unmap_sg,
449 };
450 
451 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
452 #ifdef CONFIG_SBUS
453 	.alloc			= sbus_iommu_alloc,
454 	.free			= sbus_iommu_free,
455 #endif
456 	.map_page		= sbus_iommu_map_page_pflush,
457 	.unmap_page		= sbus_iommu_unmap_page,
458 	.map_sg			= sbus_iommu_map_sg_pflush,
459 	.unmap_sg		= sbus_iommu_unmap_sg,
460 };
461 
462 void __init ld_mmu_iommu(void)
463 {
464 	if (flush_page_for_dma_global) {
465 		/* flush_page_for_dma flushes everything, no matter of what page is it */
466 		dma_ops = &sbus_iommu_dma_gflush_ops;
467 	} else {
468 		dma_ops = &sbus_iommu_dma_pflush_ops;
469 	}
470 
471 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
472 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
473 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
474 	} else {
475 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
476 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
477 	}
478 }
479