xref: /openbmc/linux/arch/sparc/mm/iommu.c (revision edb1f072)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
16 #include <linux/dma-mapping.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/io.h>
23 #include <asm/mxcc.h>
24 #include <asm/mbus.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/bitext.h>
28 #include <asm/iommu.h>
29 #include <asm/dma.h>
30 
31 #include "mm_32.h"
32 
33 /*
34  * This can be sized dynamically, but we will do this
35  * only when we have a guidance about actual I/O pressures.
36  */
37 #define IOMMU_RNGE	IOMMU_RNGE_256MB
38 #define IOMMU_START	0xF0000000
39 #define IOMMU_WINSIZE	(256*1024*1024U)
40 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
41 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
42 
43 static int viking_flush;
44 /* viking.S */
45 extern void viking_flush_page(unsigned long page);
46 extern void viking_mxcc_flush_page(unsigned long page);
47 
48 /*
49  * Values precomputed according to CPU type.
50  */
51 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
52 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
53 
54 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
55 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
56 
57 static void __init sbus_iommu_init(struct platform_device *op)
58 {
59 	struct iommu_struct *iommu;
60 	unsigned int impl, vers;
61 	unsigned long *bitmap;
62 	unsigned long control;
63 	unsigned long base;
64 	unsigned long tmp;
65 
66 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
67 	if (!iommu) {
68 		prom_printf("Unable to allocate iommu structure\n");
69 		prom_halt();
70 	}
71 
72 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73 				 "iommu_regs");
74 	if (!iommu->regs) {
75 		prom_printf("Cannot map IOMMU registers\n");
76 		prom_halt();
77 	}
78 
79 	control = sbus_readl(&iommu->regs->control);
80 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
81 	vers = (control & IOMMU_CTRL_VERS) >> 24;
82 	control &= ~(IOMMU_CTRL_RNGE);
83 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84 	sbus_writel(control, &iommu->regs->control);
85 
86 	iommu_invalidate(iommu->regs);
87 	iommu->start = IOMMU_START;
88 	iommu->end = 0xffffffff;
89 
90 	/* Allocate IOMMU page table */
91 	/* Stupid alignment constraints give me a headache.
92 	   We need 256K or 512K or 1M or 2M area aligned to
93            its size and current gfp will fortunately give
94            it to us. */
95         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96 	if (!tmp) {
97 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
98 			    IOMMU_NPTES * sizeof(iopte_t));
99 		prom_halt();
100 	}
101 	iommu->page_table = (iopte_t *)tmp;
102 
103 	/* Initialize new table. */
104 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105 	flush_cache_all();
106 	flush_tlb_all();
107 
108 	base = __pa((unsigned long)iommu->page_table) >> 4;
109 	sbus_writel(base, &iommu->regs->base);
110 	iommu_invalidate(iommu->regs);
111 
112 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113 	if (!bitmap) {
114 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
115 			    (int)(IOMMU_NPTES>>3));
116 		prom_halt();
117 	}
118 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119 	/* To be coherent on HyperSparc, the page color of DVMA
120 	 * and physical addresses must match.
121 	 */
122 	if (srmmu_modtype == HyperSparc)
123 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124 	else
125 		iommu->usemap.num_colors = 1;
126 
127 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128 	       impl, vers, iommu->page_table,
129 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130 
131 	op->dev.archdata.iommu = iommu;
132 }
133 
134 static int __init iommu_init(void)
135 {
136 	struct device_node *dp;
137 
138 	for_each_node_by_name(dp, "iommu") {
139 		struct platform_device *op = of_find_device_by_node(dp);
140 
141 		sbus_iommu_init(op);
142 		of_propagate_archdata(op);
143 	}
144 
145 	return 0;
146 }
147 
148 subsys_initcall(iommu_init);
149 
150 /* Flush the iotlb entries to ram. */
151 /* This could be better if we didn't have to flush whole pages. */
152 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
153 {
154 	unsigned long start;
155 	unsigned long end;
156 
157 	start = (unsigned long)iopte;
158 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
159 	start &= PAGE_MASK;
160 	if (viking_mxcc_present) {
161 		while(start < end) {
162 			viking_mxcc_flush_page(start);
163 			start += PAGE_SIZE;
164 		}
165 	} else if (viking_flush) {
166 		while(start < end) {
167 			viking_flush_page(start);
168 			start += PAGE_SIZE;
169 		}
170 	} else {
171 		while(start < end) {
172 			__flush_page_to_ram(start);
173 			start += PAGE_SIZE;
174 		}
175 	}
176 }
177 
178 static u32 iommu_get_one(struct device *dev, phys_addr_t paddr, int npages)
179 {
180 	struct iommu_struct *iommu = dev->archdata.iommu;
181 	int ioptex;
182 	iopte_t *iopte, *iopte0;
183 	unsigned int busa, busa0;
184 	unsigned long pfn = __phys_to_pfn(paddr);
185 	int i;
186 
187 	/* page color = pfn of page */
188 	ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
189 	if (ioptex < 0)
190 		panic("iommu out");
191 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
192 	iopte0 = &iommu->page_table[ioptex];
193 
194 	busa = busa0;
195 	iopte = iopte0;
196 	for (i = 0; i < npages; i++) {
197 		iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
198 		iommu_invalidate_page(iommu->regs, busa);
199 		busa += PAGE_SIZE;
200 		iopte++;
201 		pfn++;
202 	}
203 
204 	iommu_flush_iotlb(iopte0, npages);
205 
206 	return busa0;
207 }
208 
209 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
210 		unsigned long offset, size_t len, bool per_page_flush)
211 {
212 	phys_addr_t paddr = page_to_phys(page) + offset;
213 	unsigned long off = paddr & ~PAGE_MASK;
214 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
215 
216 	/* XXX So what is maxphys for us and how do drivers know it? */
217 	if (!len || len > 256 * 1024)
218 		return DMA_MAPPING_ERROR;
219 
220 	/*
221 	 * We expect unmapped highmem pages to be not in the cache.
222 	 * XXX Is this a good assumption?
223 	 * XXX What if someone else unmaps it here and races us?
224 	 */
225 	if (per_page_flush && !PageHighMem(page)) {
226 		unsigned long vaddr, p;
227 
228 		vaddr = (unsigned long)page_address(page) + offset;
229 		for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
230 			flush_page_for_dma(p);
231 	}
232 
233 	return iommu_get_one(dev, paddr, npages) + off;
234 }
235 
236 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
237 		struct page *page, unsigned long offset, size_t len,
238 		enum dma_data_direction dir, unsigned long attrs)
239 {
240 	flush_page_for_dma(0);
241 	return __sbus_iommu_map_page(dev, page, offset, len, false);
242 }
243 
244 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
245 		struct page *page, unsigned long offset, size_t len,
246 		enum dma_data_direction dir, unsigned long attrs)
247 {
248 	return __sbus_iommu_map_page(dev, page, offset, len, true);
249 }
250 
251 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
252 		int nents, enum dma_data_direction dir, unsigned long attrs,
253 		bool per_page_flush)
254 {
255 	struct scatterlist *sg;
256 	int j;
257 
258 	for_each_sg(sgl, sg, nents, j) {
259 		sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
260 				sg->offset, sg->length, per_page_flush);
261 		if (sg->dma_address == DMA_MAPPING_ERROR)
262 			return 0;
263 		sg->dma_length = sg->length;
264 	}
265 
266 	return nents;
267 }
268 
269 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
270 		int nents, enum dma_data_direction dir, unsigned long attrs)
271 {
272 	flush_page_for_dma(0);
273 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
274 }
275 
276 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
277 		int nents, enum dma_data_direction dir, unsigned long attrs)
278 {
279 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
280 }
281 
282 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
283 		size_t len, enum dma_data_direction dir, unsigned long attrs)
284 {
285 	struct iommu_struct *iommu = dev->archdata.iommu;
286 	unsigned int busa = dma_addr & PAGE_MASK;
287 	unsigned long off = dma_addr & ~PAGE_MASK;
288 	unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
289 	unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
290 	unsigned int i;
291 
292 	BUG_ON(busa < iommu->start);
293 	for (i = 0; i < npages; i++) {
294 		iopte_val(iommu->page_table[ioptex + i]) = 0;
295 		iommu_invalidate_page(iommu->regs, busa);
296 		busa += PAGE_SIZE;
297 	}
298 	bit_map_clear(&iommu->usemap, ioptex, npages);
299 }
300 
301 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
302 		int nents, enum dma_data_direction dir, unsigned long attrs)
303 {
304 	struct scatterlist *sg;
305 	int i;
306 
307 	for_each_sg(sgl, sg, nents, i) {
308 		sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
309 				attrs);
310 		sg->dma_address = 0x21212121;
311 	}
312 }
313 
314 #ifdef CONFIG_SBUS
315 static void *sbus_iommu_alloc(struct device *dev, size_t len,
316 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
317 {
318 	struct iommu_struct *iommu = dev->archdata.iommu;
319 	unsigned long va, addr, page, end, ret;
320 	iopte_t *iopte = iommu->page_table;
321 	iopte_t *first;
322 	int ioptex;
323 
324 	/* XXX So what is maxphys for us and how do drivers know it? */
325 	if (!len || len > 256 * 1024)
326 		return NULL;
327 
328 	len = PAGE_ALIGN(len);
329 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
330 	if (va == 0)
331 		return NULL;
332 
333 	addr = ret = sparc_dma_alloc_resource(dev, len);
334 	if (!addr)
335 		goto out_free_pages;
336 
337 	BUG_ON((va & ~PAGE_MASK) != 0);
338 	BUG_ON((addr & ~PAGE_MASK) != 0);
339 	BUG_ON((len & ~PAGE_MASK) != 0);
340 
341 	/* page color = physical address */
342 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
343 		addr >> PAGE_SHIFT);
344 	if (ioptex < 0)
345 		panic("iommu out");
346 
347 	iopte += ioptex;
348 	first = iopte;
349 	end = addr + len;
350 	while(addr < end) {
351 		page = va;
352 		{
353 			pgd_t *pgdp;
354 			pmd_t *pmdp;
355 			pte_t *ptep;
356 
357 			if (viking_mxcc_present)
358 				viking_mxcc_flush_page(page);
359 			else if (viking_flush)
360 				viking_flush_page(page);
361 			else
362 				__flush_page_to_ram(page);
363 
364 			pgdp = pgd_offset(&init_mm, addr);
365 			pmdp = pmd_offset(pgdp, addr);
366 			ptep = pte_offset_map(pmdp, addr);
367 
368 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
369 		}
370 		iopte_val(*iopte++) =
371 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
372 		addr += PAGE_SIZE;
373 		va += PAGE_SIZE;
374 	}
375 	/* P3: why do we need this?
376 	 *
377 	 * DAVEM: Because there are several aspects, none of which
378 	 *        are handled by a single interface.  Some cpus are
379 	 *        completely not I/O DMA coherent, and some have
380 	 *        virtually indexed caches.  The driver DMA flushing
381 	 *        methods handle the former case, but here during
382 	 *        IOMMU page table modifications, and usage of non-cacheable
383 	 *        cpu mappings of pages potentially in the cpu caches, we have
384 	 *        to handle the latter case as well.
385 	 */
386 	flush_cache_all();
387 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
388 	flush_tlb_all();
389 	iommu_invalidate(iommu->regs);
390 
391 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
392 	return (void *)ret;
393 
394 out_free_pages:
395 	free_pages(va, get_order(len));
396 	return NULL;
397 }
398 
399 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
400 			       dma_addr_t busa, unsigned long attrs)
401 {
402 	struct iommu_struct *iommu = dev->archdata.iommu;
403 	iopte_t *iopte = iommu->page_table;
404 	struct page *page = virt_to_page(cpu_addr);
405 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
406 	unsigned long end;
407 
408 	if (!sparc_dma_free_resource(cpu_addr, len))
409 		return;
410 
411 	BUG_ON((busa & ~PAGE_MASK) != 0);
412 	BUG_ON((len & ~PAGE_MASK) != 0);
413 
414 	iopte += ioptex;
415 	end = busa + len;
416 	while (busa < end) {
417 		iopte_val(*iopte++) = 0;
418 		busa += PAGE_SIZE;
419 	}
420 	flush_tlb_all();
421 	iommu_invalidate(iommu->regs);
422 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
423 
424 	__free_pages(page, get_order(len));
425 }
426 #endif
427 
428 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
429 #ifdef CONFIG_SBUS
430 	.alloc			= sbus_iommu_alloc,
431 	.free			= sbus_iommu_free,
432 #endif
433 	.map_page		= sbus_iommu_map_page_gflush,
434 	.unmap_page		= sbus_iommu_unmap_page,
435 	.map_sg			= sbus_iommu_map_sg_gflush,
436 	.unmap_sg		= sbus_iommu_unmap_sg,
437 };
438 
439 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
440 #ifdef CONFIG_SBUS
441 	.alloc			= sbus_iommu_alloc,
442 	.free			= sbus_iommu_free,
443 #endif
444 	.map_page		= sbus_iommu_map_page_pflush,
445 	.unmap_page		= sbus_iommu_unmap_page,
446 	.map_sg			= sbus_iommu_map_sg_pflush,
447 	.unmap_sg		= sbus_iommu_unmap_sg,
448 };
449 
450 void __init ld_mmu_iommu(void)
451 {
452 	if (flush_page_for_dma_global) {
453 		/* flush_page_for_dma flushes everything, no matter of what page is it */
454 		dma_ops = &sbus_iommu_dma_gflush_ops;
455 	} else {
456 		dma_ops = &sbus_iommu_dma_pflush_ops;
457 	}
458 
459 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
460 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
461 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
462 	} else {
463 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
464 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
465 	}
466 }
467