xref: /openbmc/linux/arch/powerpc/kernel/iommu.c (revision 64c70b1c)
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  *
4  * Rewrite, cleanup, new allocation schemes, virtual merging:
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24 
25 
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/init.h>
34 #include <linux/bitops.h>
35 #include <asm/io.h>
36 #include <asm/prom.h>
37 #include <asm/iommu.h>
38 #include <asm/pci-bridge.h>
39 #include <asm/machdep.h>
40 #include <asm/kdump.h>
41 
42 #define DBG(...)
43 
44 #ifdef CONFIG_IOMMU_VMERGE
45 static int novmerge = 0;
46 #else
47 static int novmerge = 1;
48 #endif
49 
50 static int protect4gb = 1;
51 
52 static inline unsigned long iommu_num_pages(unsigned long vaddr,
53 					    unsigned long slen)
54 {
55 	unsigned long npages;
56 
57 	npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
58 	npages >>= IOMMU_PAGE_SHIFT;
59 
60 	return npages;
61 }
62 
63 static int __init setup_protect4gb(char *str)
64 {
65 	if (strcmp(str, "on") == 0)
66 		protect4gb = 1;
67 	else if (strcmp(str, "off") == 0)
68 		protect4gb = 0;
69 
70 	return 1;
71 }
72 
73 static int __init setup_iommu(char *str)
74 {
75 	if (!strcmp(str, "novmerge"))
76 		novmerge = 1;
77 	else if (!strcmp(str, "vmerge"))
78 		novmerge = 0;
79 	return 1;
80 }
81 
82 __setup("protect4gb=", setup_protect4gb);
83 __setup("iommu=", setup_iommu);
84 
85 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
86                                        unsigned long npages,
87                                        unsigned long *handle,
88                                        unsigned long mask,
89                                        unsigned int align_order)
90 {
91 	unsigned long n, end, i, start;
92 	unsigned long limit;
93 	int largealloc = npages > 15;
94 	int pass = 0;
95 	unsigned long align_mask;
96 
97 	align_mask = 0xffffffffffffffffl >> (64 - align_order);
98 
99 	/* This allocator was derived from x86_64's bit string search */
100 
101 	/* Sanity check */
102 	if (unlikely(npages == 0)) {
103 		if (printk_ratelimit())
104 			WARN_ON(1);
105 		return DMA_ERROR_CODE;
106 	}
107 
108 	if (handle && *handle)
109 		start = *handle;
110 	else
111 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
112 
113 	/* Use only half of the table for small allocs (15 pages or less) */
114 	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
115 
116 	if (largealloc && start < tbl->it_halfpoint)
117 		start = tbl->it_halfpoint;
118 
119 	/* The case below can happen if we have a small segment appended
120 	 * to a large, or when the previous alloc was at the very end of
121 	 * the available space. If so, go back to the initial start.
122 	 */
123 	if (start >= limit)
124 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
125 
126  again:
127 
128 	if (limit + tbl->it_offset > mask) {
129 		limit = mask - tbl->it_offset + 1;
130 		/* If we're constrained on address range, first try
131 		 * at the masked hint to avoid O(n) search complexity,
132 		 * but on second pass, start at 0.
133 		 */
134 		if ((start & mask) >= limit || pass > 0)
135 			start = 0;
136 		else
137 			start &= mask;
138 	}
139 
140 	n = find_next_zero_bit(tbl->it_map, limit, start);
141 
142 	/* Align allocation */
143 	n = (n + align_mask) & ~align_mask;
144 
145 	end = n + npages;
146 
147 	if (unlikely(end >= limit)) {
148 		if (likely(pass < 2)) {
149 			/* First failure, just rescan the half of the table.
150 			 * Second failure, rescan the other half of the table.
151 			 */
152 			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
153 			limit = pass ? tbl->it_size : limit;
154 			pass++;
155 			goto again;
156 		} else {
157 			/* Third failure, give up */
158 			return DMA_ERROR_CODE;
159 		}
160 	}
161 
162 	for (i = n; i < end; i++)
163 		if (test_bit(i, tbl->it_map)) {
164 			start = i+1;
165 			goto again;
166 		}
167 
168 	for (i = n; i < end; i++)
169 		__set_bit(i, tbl->it_map);
170 
171 	/* Bump the hint to a new block for small allocs. */
172 	if (largealloc) {
173 		/* Don't bump to new block to avoid fragmentation */
174 		tbl->it_largehint = end;
175 	} else {
176 		/* Overflow will be taken care of at the next allocation */
177 		tbl->it_hint = (end + tbl->it_blocksize - 1) &
178 		                ~(tbl->it_blocksize - 1);
179 	}
180 
181 	/* Update handle for SG allocations */
182 	if (handle)
183 		*handle = end;
184 
185 	return n;
186 }
187 
188 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
189 		       unsigned int npages, enum dma_data_direction direction,
190 		       unsigned long mask, unsigned int align_order)
191 {
192 	unsigned long entry, flags;
193 	dma_addr_t ret = DMA_ERROR_CODE;
194 
195 	spin_lock_irqsave(&(tbl->it_lock), flags);
196 
197 	entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
198 
199 	if (unlikely(entry == DMA_ERROR_CODE)) {
200 		spin_unlock_irqrestore(&(tbl->it_lock), flags);
201 		return DMA_ERROR_CODE;
202 	}
203 
204 	entry += tbl->it_offset;	/* Offset into real TCE table */
205 	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
206 
207 	/* Put the TCEs in the HW table */
208 	ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
209 			 direction);
210 
211 
212 	/* Flush/invalidate TLB caches if necessary */
213 	if (ppc_md.tce_flush)
214 		ppc_md.tce_flush(tbl);
215 
216 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
217 
218 	/* Make sure updates are seen by hardware */
219 	mb();
220 
221 	return ret;
222 }
223 
224 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
225 			 unsigned int npages)
226 {
227 	unsigned long entry, free_entry;
228 	unsigned long i;
229 
230 	entry = dma_addr >> IOMMU_PAGE_SHIFT;
231 	free_entry = entry - tbl->it_offset;
232 
233 	if (((free_entry + npages) > tbl->it_size) ||
234 	    (entry < tbl->it_offset)) {
235 		if (printk_ratelimit()) {
236 			printk(KERN_INFO "iommu_free: invalid entry\n");
237 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
238 			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
239 			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
240 			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
241 			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
242 			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
243 			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
244 			WARN_ON(1);
245 		}
246 		return;
247 	}
248 
249 	ppc_md.tce_free(tbl, entry, npages);
250 
251 	for (i = 0; i < npages; i++)
252 		__clear_bit(free_entry+i, tbl->it_map);
253 }
254 
255 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
256 		unsigned int npages)
257 {
258 	unsigned long flags;
259 
260 	spin_lock_irqsave(&(tbl->it_lock), flags);
261 
262 	__iommu_free(tbl, dma_addr, npages);
263 
264 	/* Make sure TLB cache is flushed if the HW needs it. We do
265 	 * not do an mb() here on purpose, it is not needed on any of
266 	 * the current platforms.
267 	 */
268 	if (ppc_md.tce_flush)
269 		ppc_md.tce_flush(tbl);
270 
271 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
272 }
273 
274 int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
275 		 int nelems, unsigned long mask,
276 		 enum dma_data_direction direction)
277 {
278 	dma_addr_t dma_next = 0, dma_addr;
279 	unsigned long flags;
280 	struct scatterlist *s, *outs, *segstart;
281 	int outcount, incount;
282 	unsigned long handle;
283 
284 	BUG_ON(direction == DMA_NONE);
285 
286 	if ((nelems == 0) || !tbl)
287 		return 0;
288 
289 	outs = s = segstart = &sglist[0];
290 	outcount = 1;
291 	incount = nelems;
292 	handle = 0;
293 
294 	/* Init first segment length for backout at failure */
295 	outs->dma_length = 0;
296 
297 	DBG("sg mapping %d elements:\n", nelems);
298 
299 	spin_lock_irqsave(&(tbl->it_lock), flags);
300 
301 	for (s = outs; nelems; nelems--, s++) {
302 		unsigned long vaddr, npages, entry, slen;
303 
304 		slen = s->length;
305 		/* Sanity check */
306 		if (slen == 0) {
307 			dma_next = 0;
308 			continue;
309 		}
310 		/* Allocate iommu entries for that segment */
311 		vaddr = (unsigned long)page_address(s->page) + s->offset;
312 		npages = iommu_num_pages(vaddr, slen);
313 		entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
314 
315 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
316 
317 		/* Handle failure */
318 		if (unlikely(entry == DMA_ERROR_CODE)) {
319 			if (printk_ratelimit())
320 				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
321 				       " npages %lx\n", tbl, vaddr, npages);
322 			goto failure;
323 		}
324 
325 		/* Convert entry to a dma_addr_t */
326 		entry += tbl->it_offset;
327 		dma_addr = entry << IOMMU_PAGE_SHIFT;
328 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
329 
330 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
331 			    npages, entry, dma_addr);
332 
333 		/* Insert into HW table */
334 		ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
335 
336 		/* If we are in an open segment, try merging */
337 		if (segstart != s) {
338 			DBG("  - trying merge...\n");
339 			/* We cannot merge if:
340 			 * - allocated dma_addr isn't contiguous to previous allocation
341 			 */
342 			if (novmerge || (dma_addr != dma_next)) {
343 				/* Can't merge: create a new segment */
344 				segstart = s;
345 				outcount++; outs++;
346 				DBG("    can't merge, new segment.\n");
347 			} else {
348 				outs->dma_length += s->length;
349 				DBG("    merged, new len: %ux\n", outs->dma_length);
350 			}
351 		}
352 
353 		if (segstart == s) {
354 			/* This is a new segment, fill entries */
355 			DBG("  - filling new segment.\n");
356 			outs->dma_address = dma_addr;
357 			outs->dma_length = slen;
358 		}
359 
360 		/* Calculate next page pointer for contiguous check */
361 		dma_next = dma_addr + slen;
362 
363 		DBG("  - dma next is: %lx\n", dma_next);
364 	}
365 
366 	/* Flush/invalidate TLB caches if necessary */
367 	if (ppc_md.tce_flush)
368 		ppc_md.tce_flush(tbl);
369 
370 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
371 
372 	DBG("mapped %d elements:\n", outcount);
373 
374 	/* For the sake of iommu_unmap_sg, we clear out the length in the
375 	 * next entry of the sglist if we didn't fill the list completely
376 	 */
377 	if (outcount < incount) {
378 		outs++;
379 		outs->dma_address = DMA_ERROR_CODE;
380 		outs->dma_length = 0;
381 	}
382 
383 	/* Make sure updates are seen by hardware */
384 	mb();
385 
386 	return outcount;
387 
388  failure:
389 	for (s = &sglist[0]; s <= outs; s++) {
390 		if (s->dma_length != 0) {
391 			unsigned long vaddr, npages;
392 
393 			vaddr = s->dma_address & IOMMU_PAGE_MASK;
394 			npages = iommu_num_pages(s->dma_address, s->dma_length);
395 			__iommu_free(tbl, vaddr, npages);
396 			s->dma_address = DMA_ERROR_CODE;
397 			s->dma_length = 0;
398 		}
399 	}
400 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
401 	return 0;
402 }
403 
404 
405 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
406 		int nelems, enum dma_data_direction direction)
407 {
408 	unsigned long flags;
409 
410 	BUG_ON(direction == DMA_NONE);
411 
412 	if (!tbl)
413 		return;
414 
415 	spin_lock_irqsave(&(tbl->it_lock), flags);
416 
417 	while (nelems--) {
418 		unsigned int npages;
419 		dma_addr_t dma_handle = sglist->dma_address;
420 
421 		if (sglist->dma_length == 0)
422 			break;
423 		npages = iommu_num_pages(dma_handle,sglist->dma_length);
424 		__iommu_free(tbl, dma_handle, npages);
425 		sglist++;
426 	}
427 
428 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
429 	 * do not do an mb() here, the affected platforms do not need it
430 	 * when freeing.
431 	 */
432 	if (ppc_md.tce_flush)
433 		ppc_md.tce_flush(tbl);
434 
435 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
436 }
437 
438 /*
439  * Build a iommu_table structure.  This contains a bit map which
440  * is used to manage allocation of the tce space.
441  */
442 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
443 {
444 	unsigned long sz;
445 	unsigned long start_index, end_index;
446 	unsigned long entries_per_4g;
447 	unsigned long index;
448 	static int welcomed = 0;
449 	struct page *page;
450 
451 	/* Set aside 1/4 of the table for large allocations. */
452 	tbl->it_halfpoint = tbl->it_size * 3 / 4;
453 
454 	/* number of bytes needed for the bitmap */
455 	sz = (tbl->it_size + 7) >> 3;
456 
457 	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
458 	if (!page)
459 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
460 	tbl->it_map = page_address(page);
461 	memset(tbl->it_map, 0, sz);
462 
463 	tbl->it_hint = 0;
464 	tbl->it_largehint = tbl->it_halfpoint;
465 	spin_lock_init(&tbl->it_lock);
466 
467 #ifdef CONFIG_CRASH_DUMP
468 	if (ppc_md.tce_get) {
469 		unsigned long tceval;
470 		unsigned long tcecount = 0;
471 
472 		/*
473 		 * Reserve the existing mappings left by the first kernel.
474 		 */
475 		for (index = 0; index < tbl->it_size; index++) {
476 			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
477 			/*
478 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
479 			 */
480 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
481 				__set_bit(index, tbl->it_map);
482 				tcecount++;
483 			}
484 		}
485 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
486 			printk(KERN_WARNING "TCE table is full; ");
487 			printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
488 				KDUMP_MIN_TCE_ENTRIES);
489 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
490 				index < tbl->it_size; index++)
491 				__clear_bit(index, tbl->it_map);
492 		}
493 	}
494 #else
495 	/* Clear the hardware table in case firmware left allocations in it */
496 	ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
497 #endif
498 
499 	/*
500 	 * DMA cannot cross 4 GB boundary.  Mark last entry of each 4
501 	 * GB chunk as reserved.
502 	 */
503 	if (protect4gb) {
504 		entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
505 
506 		/* Mark the last bit before a 4GB boundary as used */
507 		start_index = tbl->it_offset | (entries_per_4g - 1);
508 		start_index -= tbl->it_offset;
509 
510 		end_index = tbl->it_size;
511 
512 		for (index = start_index; index < end_index - 1; index += entries_per_4g)
513 			__set_bit(index, tbl->it_map);
514 	}
515 
516 	if (!welcomed) {
517 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
518 		       novmerge ? "disabled" : "enabled");
519 		welcomed = 1;
520 	}
521 
522 	return tbl;
523 }
524 
525 void iommu_free_table(struct device_node *dn)
526 {
527 	struct pci_dn *pdn = dn->data;
528 	struct iommu_table *tbl = pdn->iommu_table;
529 	unsigned long bitmap_sz, i;
530 	unsigned int order;
531 
532 	if (!tbl || !tbl->it_map) {
533 		printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
534 				dn->full_name);
535 		return;
536 	}
537 
538 	/* verify that table contains no entries */
539 	/* it_size is in entries, and we're examining 64 at a time */
540 	for (i = 0; i < (tbl->it_size/64); i++) {
541 		if (tbl->it_map[i] != 0) {
542 			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
543 				__FUNCTION__, dn->full_name);
544 			break;
545 		}
546 	}
547 
548 	/* calculate bitmap size in bytes */
549 	bitmap_sz = (tbl->it_size + 7) / 8;
550 
551 	/* free bitmap */
552 	order = get_order(bitmap_sz);
553 	free_pages((unsigned long) tbl->it_map, order);
554 
555 	/* free table */
556 	kfree(tbl);
557 }
558 
559 /* Creates TCEs for a user provided buffer.  The user buffer must be
560  * contiguous real kernel storage (not vmalloc).  The address of the buffer
561  * passed here is the kernel (virtual) address of the buffer.  The buffer
562  * need not be page aligned, the dma_addr_t returned will point to the same
563  * byte within the page as vaddr.
564  */
565 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
566 		size_t size, unsigned long mask,
567 		enum dma_data_direction direction)
568 {
569 	dma_addr_t dma_handle = DMA_ERROR_CODE;
570 	unsigned long uaddr;
571 	unsigned int npages;
572 
573 	BUG_ON(direction == DMA_NONE);
574 
575 	uaddr = (unsigned long)vaddr;
576 	npages = iommu_num_pages(uaddr, size);
577 
578 	if (tbl) {
579 		dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
580 					 mask >> IOMMU_PAGE_SHIFT, 0);
581 		if (dma_handle == DMA_ERROR_CODE) {
582 			if (printk_ratelimit())  {
583 				printk(KERN_INFO "iommu_alloc failed, "
584 						"tbl %p vaddr %p npages %d\n",
585 						tbl, vaddr, npages);
586 			}
587 		} else
588 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
589 	}
590 
591 	return dma_handle;
592 }
593 
594 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
595 		size_t size, enum dma_data_direction direction)
596 {
597 	unsigned int npages;
598 
599 	BUG_ON(direction == DMA_NONE);
600 
601 	if (tbl) {
602 		npages = iommu_num_pages(dma_handle, size);
603 		iommu_free(tbl, dma_handle, npages);
604 	}
605 }
606 
607 /* Allocates a contiguous real buffer and creates mappings over it.
608  * Returns the virtual address of the buffer and sets dma_handle
609  * to the dma address (mapping) of the first page.
610  */
611 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
612 		dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
613 {
614 	void *ret = NULL;
615 	dma_addr_t mapping;
616 	unsigned int order;
617 	unsigned int nio_pages, io_order;
618 	struct page *page;
619 
620 	size = PAGE_ALIGN(size);
621 	order = get_order(size);
622 
623  	/*
624 	 * Client asked for way too much space.  This is checked later
625 	 * anyway.  It is easier to debug here for the drivers than in
626 	 * the tce tables.
627 	 */
628 	if (order >= IOMAP_MAX_ORDER) {
629 		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
630 		return NULL;
631 	}
632 
633 	if (!tbl)
634 		return NULL;
635 
636 	/* Alloc enough pages (and possibly more) */
637 	page = alloc_pages_node(node, flag, order);
638 	if (!page)
639 		return NULL;
640 	ret = page_address(page);
641 	memset(ret, 0, size);
642 
643 	/* Set up tces to cover the allocated range */
644 	nio_pages = size >> IOMMU_PAGE_SHIFT;
645 	io_order = get_iommu_order(size);
646 	mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
647 			      mask >> IOMMU_PAGE_SHIFT, io_order);
648 	if (mapping == DMA_ERROR_CODE) {
649 		free_pages((unsigned long)ret, order);
650 		return NULL;
651 	}
652 	*dma_handle = mapping;
653 	return ret;
654 }
655 
656 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
657 			 void *vaddr, dma_addr_t dma_handle)
658 {
659 	if (tbl) {
660 		unsigned int nio_pages;
661 
662 		size = PAGE_ALIGN(size);
663 		nio_pages = size >> IOMMU_PAGE_SHIFT;
664 		iommu_free(tbl, dma_handle, nio_pages);
665 		size = PAGE_ALIGN(size);
666 		free_pages((unsigned long)vaddr, get_order(size));
667 	}
668 }
669