xref: /openbmc/linux/arch/sparc/mm/iommu.c (revision cff11abeca78aa782378401ca2800bd2194aa14e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * iommu.c:  IOMMU specific routines for memory management.
4  *
5  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/of.h>
17 #include <linux/of_device.h>
18 
19 #include <asm/pgalloc.h>
20 #include <asm/pgtable.h>
21 #include <asm/io.h>
22 #include <asm/mxcc.h>
23 #include <asm/mbus.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/bitext.h>
27 #include <asm/iommu.h>
28 #include <asm/dma.h>
29 
30 #include "mm_32.h"
31 
32 /*
33  * This can be sized dynamically, but we will do this
34  * only when we have a guidance about actual I/O pressures.
35  */
36 #define IOMMU_RNGE	IOMMU_RNGE_256MB
37 #define IOMMU_START	0xF0000000
38 #define IOMMU_WINSIZE	(256*1024*1024U)
39 #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
40 #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
41 
42 static int viking_flush;
43 /* viking.S */
44 extern void viking_flush_page(unsigned long page);
45 extern void viking_mxcc_flush_page(unsigned long page);
46 
47 /*
48  * Values precomputed according to CPU type.
49  */
50 static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
51 static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
52 
53 #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
54 #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
55 
56 static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
57 static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
58 
59 static void __init sbus_iommu_init(struct platform_device *op)
60 {
61 	struct iommu_struct *iommu;
62 	unsigned int impl, vers;
63 	unsigned long *bitmap;
64 	unsigned long control;
65 	unsigned long base;
66 	unsigned long tmp;
67 
68 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
69 	if (!iommu) {
70 		prom_printf("Unable to allocate iommu structure\n");
71 		prom_halt();
72 	}
73 
74 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
75 				 "iommu_regs");
76 	if (!iommu->regs) {
77 		prom_printf("Cannot map IOMMU registers\n");
78 		prom_halt();
79 	}
80 
81 	control = sbus_readl(&iommu->regs->control);
82 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
83 	vers = (control & IOMMU_CTRL_VERS) >> 24;
84 	control &= ~(IOMMU_CTRL_RNGE);
85 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
86 	sbus_writel(control, &iommu->regs->control);
87 
88 	iommu_invalidate(iommu->regs);
89 	iommu->start = IOMMU_START;
90 	iommu->end = 0xffffffff;
91 
92 	/* Allocate IOMMU page table */
93 	/* Stupid alignment constraints give me a headache.
94 	   We need 256K or 512K or 1M or 2M area aligned to
95            its size and current gfp will fortunately give
96            it to us. */
97         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
98 	if (!tmp) {
99 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
100 			    IOMMU_NPTES * sizeof(iopte_t));
101 		prom_halt();
102 	}
103 	iommu->page_table = (iopte_t *)tmp;
104 
105 	/* Initialize new table. */
106 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
107 	flush_cache_all();
108 	flush_tlb_all();
109 
110 	base = __pa((unsigned long)iommu->page_table) >> 4;
111 	sbus_writel(base, &iommu->regs->base);
112 	iommu_invalidate(iommu->regs);
113 
114 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
115 	if (!bitmap) {
116 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
117 			    (int)(IOMMU_NPTES>>3));
118 		prom_halt();
119 	}
120 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
121 	/* To be coherent on HyperSparc, the page color of DVMA
122 	 * and physical addresses must match.
123 	 */
124 	if (srmmu_modtype == HyperSparc)
125 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
126 	else
127 		iommu->usemap.num_colors = 1;
128 
129 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
130 	       impl, vers, iommu->page_table,
131 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
132 
133 	op->dev.archdata.iommu = iommu;
134 
135 	if (flush_page_for_dma_global)
136 		op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
137 	 else
138 		op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
139 }
140 
141 static int __init iommu_init(void)
142 {
143 	struct device_node *dp;
144 
145 	for_each_node_by_name(dp, "iommu") {
146 		struct platform_device *op = of_find_device_by_node(dp);
147 
148 		sbus_iommu_init(op);
149 		of_propagate_archdata(op);
150 	}
151 
152 	return 0;
153 }
154 
155 subsys_initcall(iommu_init);
156 
157 /* Flush the iotlb entries to ram. */
158 /* This could be better if we didn't have to flush whole pages. */
159 static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
160 {
161 	unsigned long start;
162 	unsigned long end;
163 
164 	start = (unsigned long)iopte;
165 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
166 	start &= PAGE_MASK;
167 	if (viking_mxcc_present) {
168 		while(start < end) {
169 			viking_mxcc_flush_page(start);
170 			start += PAGE_SIZE;
171 		}
172 	} else if (viking_flush) {
173 		while(start < end) {
174 			viking_flush_page(start);
175 			start += PAGE_SIZE;
176 		}
177 	} else {
178 		while(start < end) {
179 			__flush_page_to_ram(start);
180 			start += PAGE_SIZE;
181 		}
182 	}
183 }
184 
185 static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
186 		unsigned long offset, size_t len, bool per_page_flush)
187 {
188 	struct iommu_struct *iommu = dev->archdata.iommu;
189 	phys_addr_t paddr = page_to_phys(page) + offset;
190 	unsigned long off = paddr & ~PAGE_MASK;
191 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
192 	unsigned long pfn = __phys_to_pfn(paddr);
193 	unsigned int busa, busa0;
194 	iopte_t *iopte, *iopte0;
195 	int ioptex, i;
196 
197 	/* XXX So what is maxphys for us and how do drivers know it? */
198 	if (!len || len > 256 * 1024)
199 		return DMA_MAPPING_ERROR;
200 
201 	/*
202 	 * We expect unmapped highmem pages to be not in the cache.
203 	 * XXX Is this a good assumption?
204 	 * XXX What if someone else unmaps it here and races us?
205 	 */
206 	if (per_page_flush && !PageHighMem(page)) {
207 		unsigned long vaddr, p;
208 
209 		vaddr = (unsigned long)page_address(page) + offset;
210 		for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
211 			flush_page_for_dma(p);
212 	}
213 
214 	/* page color = pfn of page */
215 	ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
216 	if (ioptex < 0)
217 		panic("iommu out");
218 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
219 	iopte0 = &iommu->page_table[ioptex];
220 
221 	busa = busa0;
222 	iopte = iopte0;
223 	for (i = 0; i < npages; i++) {
224 		iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
225 		iommu_invalidate_page(iommu->regs, busa);
226 		busa += PAGE_SIZE;
227 		iopte++;
228 		pfn++;
229 	}
230 
231 	iommu_flush_iotlb(iopte0, npages);
232 	return busa0 + off;
233 }
234 
235 static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
236 		struct page *page, unsigned long offset, size_t len,
237 		enum dma_data_direction dir, unsigned long attrs)
238 {
239 	flush_page_for_dma(0);
240 	return __sbus_iommu_map_page(dev, page, offset, len, false);
241 }
242 
243 static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
244 		struct page *page, unsigned long offset, size_t len,
245 		enum dma_data_direction dir, unsigned long attrs)
246 {
247 	return __sbus_iommu_map_page(dev, page, offset, len, true);
248 }
249 
250 static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
251 		int nents, enum dma_data_direction dir, unsigned long attrs,
252 		bool per_page_flush)
253 {
254 	struct scatterlist *sg;
255 	int j;
256 
257 	for_each_sg(sgl, sg, nents, j) {
258 		sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
259 				sg->offset, sg->length, per_page_flush);
260 		if (sg->dma_address == DMA_MAPPING_ERROR)
261 			return 0;
262 		sg->dma_length = sg->length;
263 	}
264 
265 	return nents;
266 }
267 
268 static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
269 		int nents, enum dma_data_direction dir, unsigned long attrs)
270 {
271 	flush_page_for_dma(0);
272 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
273 }
274 
275 static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
276 		int nents, enum dma_data_direction dir, unsigned long attrs)
277 {
278 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
279 }
280 
281 static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
282 		size_t len, enum dma_data_direction dir, unsigned long attrs)
283 {
284 	struct iommu_struct *iommu = dev->archdata.iommu;
285 	unsigned int busa = dma_addr & PAGE_MASK;
286 	unsigned long off = dma_addr & ~PAGE_MASK;
287 	unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
288 	unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
289 	unsigned int i;
290 
291 	BUG_ON(busa < iommu->start);
292 	for (i = 0; i < npages; i++) {
293 		iopte_val(iommu->page_table[ioptex + i]) = 0;
294 		iommu_invalidate_page(iommu->regs, busa);
295 		busa += PAGE_SIZE;
296 	}
297 	bit_map_clear(&iommu->usemap, ioptex, npages);
298 }
299 
300 static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
301 		int nents, enum dma_data_direction dir, unsigned long attrs)
302 {
303 	struct scatterlist *sg;
304 	int i;
305 
306 	for_each_sg(sgl, sg, nents, i) {
307 		sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
308 				attrs);
309 		sg->dma_address = 0x21212121;
310 	}
311 }
312 
313 #ifdef CONFIG_SBUS
314 static void *sbus_iommu_alloc(struct device *dev, size_t len,
315 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
316 {
317 	struct iommu_struct *iommu = dev->archdata.iommu;
318 	unsigned long va, addr, page, end, ret;
319 	iopte_t *iopte = iommu->page_table;
320 	iopte_t *first;
321 	int ioptex;
322 
323 	/* XXX So what is maxphys for us and how do drivers know it? */
324 	if (!len || len > 256 * 1024)
325 		return NULL;
326 
327 	len = PAGE_ALIGN(len);
328 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
329 	if (va == 0)
330 		return NULL;
331 
332 	addr = ret = sparc_dma_alloc_resource(dev, len);
333 	if (!addr)
334 		goto out_free_pages;
335 
336 	BUG_ON((va & ~PAGE_MASK) != 0);
337 	BUG_ON((addr & ~PAGE_MASK) != 0);
338 	BUG_ON((len & ~PAGE_MASK) != 0);
339 
340 	/* page color = physical address */
341 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
342 		addr >> PAGE_SHIFT);
343 	if (ioptex < 0)
344 		panic("iommu out");
345 
346 	iopte += ioptex;
347 	first = iopte;
348 	end = addr + len;
349 	while(addr < end) {
350 		page = va;
351 		{
352 			pgd_t *pgdp;
353 			p4d_t *p4dp;
354 			pud_t *pudp;
355 			pmd_t *pmdp;
356 			pte_t *ptep;
357 
358 			if (viking_mxcc_present)
359 				viking_mxcc_flush_page(page);
360 			else if (viking_flush)
361 				viking_flush_page(page);
362 			else
363 				__flush_page_to_ram(page);
364 
365 			pgdp = pgd_offset(&init_mm, addr);
366 			p4dp = p4d_offset(pgdp, addr);
367 			pudp = pud_offset(p4dp, addr);
368 			pmdp = pmd_offset(pudp, addr);
369 			ptep = pte_offset_map(pmdp, addr);
370 
371 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
372 		}
373 		iopte_val(*iopte++) =
374 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
375 		addr += PAGE_SIZE;
376 		va += PAGE_SIZE;
377 	}
378 	/* P3: why do we need this?
379 	 *
380 	 * DAVEM: Because there are several aspects, none of which
381 	 *        are handled by a single interface.  Some cpus are
382 	 *        completely not I/O DMA coherent, and some have
383 	 *        virtually indexed caches.  The driver DMA flushing
384 	 *        methods handle the former case, but here during
385 	 *        IOMMU page table modifications, and usage of non-cacheable
386 	 *        cpu mappings of pages potentially in the cpu caches, we have
387 	 *        to handle the latter case as well.
388 	 */
389 	flush_cache_all();
390 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
391 	flush_tlb_all();
392 	iommu_invalidate(iommu->regs);
393 
394 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
395 	return (void *)ret;
396 
397 out_free_pages:
398 	free_pages(va, get_order(len));
399 	return NULL;
400 }
401 
402 static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
403 			       dma_addr_t busa, unsigned long attrs)
404 {
405 	struct iommu_struct *iommu = dev->archdata.iommu;
406 	iopte_t *iopte = iommu->page_table;
407 	struct page *page = virt_to_page(cpu_addr);
408 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
409 	unsigned long end;
410 
411 	if (!sparc_dma_free_resource(cpu_addr, len))
412 		return;
413 
414 	BUG_ON((busa & ~PAGE_MASK) != 0);
415 	BUG_ON((len & ~PAGE_MASK) != 0);
416 
417 	iopte += ioptex;
418 	end = busa + len;
419 	while (busa < end) {
420 		iopte_val(*iopte++) = 0;
421 		busa += PAGE_SIZE;
422 	}
423 	flush_tlb_all();
424 	iommu_invalidate(iommu->regs);
425 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
426 
427 	__free_pages(page, get_order(len));
428 }
429 #endif
430 
431 static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
432 #ifdef CONFIG_SBUS
433 	.alloc			= sbus_iommu_alloc,
434 	.free			= sbus_iommu_free,
435 #endif
436 	.map_page		= sbus_iommu_map_page_gflush,
437 	.unmap_page		= sbus_iommu_unmap_page,
438 	.map_sg			= sbus_iommu_map_sg_gflush,
439 	.unmap_sg		= sbus_iommu_unmap_sg,
440 };
441 
442 static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
443 #ifdef CONFIG_SBUS
444 	.alloc			= sbus_iommu_alloc,
445 	.free			= sbus_iommu_free,
446 #endif
447 	.map_page		= sbus_iommu_map_page_pflush,
448 	.unmap_page		= sbus_iommu_unmap_page,
449 	.map_sg			= sbus_iommu_map_sg_pflush,
450 	.unmap_sg		= sbus_iommu_unmap_sg,
451 };
452 
453 void __init ld_mmu_iommu(void)
454 {
455 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
456 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
457 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
458 	} else {
459 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
460 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
461 	}
462 }
463