xref: /openbmc/linux/arch/powerpc/kernel/iommu.c (revision 2c684d89)
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  *
4  * Rewrite, cleanup, new allocation schemes, virtual merging:
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24 
25 
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
36 #include <linux/hash.h>
37 #include <linux/fault-inject.h>
38 #include <linux/pci.h>
39 #include <linux/iommu.h>
40 #include <linux/sched.h>
41 #include <asm/io.h>
42 #include <asm/prom.h>
43 #include <asm/iommu.h>
44 #include <asm/pci-bridge.h>
45 #include <asm/machdep.h>
46 #include <asm/kdump.h>
47 #include <asm/fadump.h>
48 #include <asm/vio.h>
49 #include <asm/tce.h>
50 
51 #define DBG(...)
52 
53 static int novmerge;
54 
55 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
56 
57 static int __init setup_iommu(char *str)
58 {
59 	if (!strcmp(str, "novmerge"))
60 		novmerge = 1;
61 	else if (!strcmp(str, "vmerge"))
62 		novmerge = 0;
63 	return 1;
64 }
65 
66 __setup("iommu=", setup_iommu);
67 
68 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
69 
70 /*
71  * We precalculate the hash to avoid doing it on every allocation.
72  *
73  * The hash is important to spread CPUs across all the pools. For example,
74  * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
75  * with 4 pools all primary threads would map to the same pool.
76  */
77 static int __init setup_iommu_pool_hash(void)
78 {
79 	unsigned int i;
80 
81 	for_each_possible_cpu(i)
82 		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
83 
84 	return 0;
85 }
86 subsys_initcall(setup_iommu_pool_hash);
87 
88 #ifdef CONFIG_FAIL_IOMMU
89 
90 static DECLARE_FAULT_ATTR(fail_iommu);
91 
92 static int __init setup_fail_iommu(char *str)
93 {
94 	return setup_fault_attr(&fail_iommu, str);
95 }
96 __setup("fail_iommu=", setup_fail_iommu);
97 
98 static bool should_fail_iommu(struct device *dev)
99 {
100 	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
101 }
102 
103 static int __init fail_iommu_debugfs(void)
104 {
105 	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 						       NULL, &fail_iommu);
107 
108 	return PTR_ERR_OR_ZERO(dir);
109 }
110 late_initcall(fail_iommu_debugfs);
111 
112 static ssize_t fail_iommu_show(struct device *dev,
113 			       struct device_attribute *attr, char *buf)
114 {
115 	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
116 }
117 
118 static ssize_t fail_iommu_store(struct device *dev,
119 				struct device_attribute *attr, const char *buf,
120 				size_t count)
121 {
122 	int i;
123 
124 	if (count > 0 && sscanf(buf, "%d", &i) > 0)
125 		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
126 
127 	return count;
128 }
129 
130 static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
131 		   fail_iommu_store);
132 
133 static int fail_iommu_bus_notify(struct notifier_block *nb,
134 				 unsigned long action, void *data)
135 {
136 	struct device *dev = data;
137 
138 	if (action == BUS_NOTIFY_ADD_DEVICE) {
139 		if (device_create_file(dev, &dev_attr_fail_iommu))
140 			pr_warn("Unable to create IOMMU fault injection sysfs "
141 				"entries\n");
142 	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
143 		device_remove_file(dev, &dev_attr_fail_iommu);
144 	}
145 
146 	return 0;
147 }
148 
149 static struct notifier_block fail_iommu_bus_notifier = {
150 	.notifier_call = fail_iommu_bus_notify
151 };
152 
153 static int __init fail_iommu_setup(void)
154 {
155 #ifdef CONFIG_PCI
156 	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
157 #endif
158 #ifdef CONFIG_IBMVIO
159 	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
160 #endif
161 
162 	return 0;
163 }
164 /*
165  * Must execute after PCI and VIO subsystem have initialised but before
166  * devices are probed.
167  */
168 arch_initcall(fail_iommu_setup);
169 #else
170 static inline bool should_fail_iommu(struct device *dev)
171 {
172 	return false;
173 }
174 #endif
175 
176 static unsigned long iommu_range_alloc(struct device *dev,
177 				       struct iommu_table *tbl,
178                                        unsigned long npages,
179                                        unsigned long *handle,
180                                        unsigned long mask,
181                                        unsigned int align_order)
182 {
183 	unsigned long n, end, start;
184 	unsigned long limit;
185 	int largealloc = npages > 15;
186 	int pass = 0;
187 	unsigned long align_mask;
188 	unsigned long boundary_size;
189 	unsigned long flags;
190 	unsigned int pool_nr;
191 	struct iommu_pool *pool;
192 
193 	align_mask = 0xffffffffffffffffl >> (64 - align_order);
194 
195 	/* This allocator was derived from x86_64's bit string search */
196 
197 	/* Sanity check */
198 	if (unlikely(npages == 0)) {
199 		if (printk_ratelimit())
200 			WARN_ON(1);
201 		return DMA_ERROR_CODE;
202 	}
203 
204 	if (should_fail_iommu(dev))
205 		return DMA_ERROR_CODE;
206 
207 	/*
208 	 * We don't need to disable preemption here because any CPU can
209 	 * safely use any IOMMU pool.
210 	 */
211 	pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
212 
213 	if (largealloc)
214 		pool = &(tbl->large_pool);
215 	else
216 		pool = &(tbl->pools[pool_nr]);
217 
218 	spin_lock_irqsave(&(pool->lock), flags);
219 
220 again:
221 	if ((pass == 0) && handle && *handle &&
222 	    (*handle >= pool->start) && (*handle < pool->end))
223 		start = *handle;
224 	else
225 		start = pool->hint;
226 
227 	limit = pool->end;
228 
229 	/* The case below can happen if we have a small segment appended
230 	 * to a large, or when the previous alloc was at the very end of
231 	 * the available space. If so, go back to the initial start.
232 	 */
233 	if (start >= limit)
234 		start = pool->start;
235 
236 	if (limit + tbl->it_offset > mask) {
237 		limit = mask - tbl->it_offset + 1;
238 		/* If we're constrained on address range, first try
239 		 * at the masked hint to avoid O(n) search complexity,
240 		 * but on second pass, start at 0 in pool 0.
241 		 */
242 		if ((start & mask) >= limit || pass > 0) {
243 			spin_unlock(&(pool->lock));
244 			pool = &(tbl->pools[0]);
245 			spin_lock(&(pool->lock));
246 			start = pool->start;
247 		} else {
248 			start &= mask;
249 		}
250 	}
251 
252 	if (dev)
253 		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 				      1 << tbl->it_page_shift);
255 	else
256 		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
257 	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
258 
259 	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 			     boundary_size >> tbl->it_page_shift, align_mask);
261 	if (n == -1) {
262 		if (likely(pass == 0)) {
263 			/* First try the pool from the start */
264 			pool->hint = pool->start;
265 			pass++;
266 			goto again;
267 
268 		} else if (pass <= tbl->nr_pools) {
269 			/* Now try scanning all the other pools */
270 			spin_unlock(&(pool->lock));
271 			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
272 			pool = &tbl->pools[pool_nr];
273 			spin_lock(&(pool->lock));
274 			pool->hint = pool->start;
275 			pass++;
276 			goto again;
277 
278 		} else {
279 			/* Give up */
280 			spin_unlock_irqrestore(&(pool->lock), flags);
281 			return DMA_ERROR_CODE;
282 		}
283 	}
284 
285 	end = n + npages;
286 
287 	/* Bump the hint to a new block for small allocs. */
288 	if (largealloc) {
289 		/* Don't bump to new block to avoid fragmentation */
290 		pool->hint = end;
291 	} else {
292 		/* Overflow will be taken care of at the next allocation */
293 		pool->hint = (end + tbl->it_blocksize - 1) &
294 		                ~(tbl->it_blocksize - 1);
295 	}
296 
297 	/* Update handle for SG allocations */
298 	if (handle)
299 		*handle = end;
300 
301 	spin_unlock_irqrestore(&(pool->lock), flags);
302 
303 	return n;
304 }
305 
306 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
307 			      void *page, unsigned int npages,
308 			      enum dma_data_direction direction,
309 			      unsigned long mask, unsigned int align_order,
310 			      struct dma_attrs *attrs)
311 {
312 	unsigned long entry;
313 	dma_addr_t ret = DMA_ERROR_CODE;
314 	int build_fail;
315 
316 	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
317 
318 	if (unlikely(entry == DMA_ERROR_CODE))
319 		return DMA_ERROR_CODE;
320 
321 	entry += tbl->it_offset;	/* Offset into real TCE table */
322 	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
323 
324 	/* Put the TCEs in the HW table */
325 	build_fail = tbl->it_ops->set(tbl, entry, npages,
326 				      (unsigned long)page &
327 				      IOMMU_PAGE_MASK(tbl), direction, attrs);
328 
329 	/* tbl->it_ops->set() only returns non-zero for transient errors.
330 	 * Clean up the table bitmap in this case and return
331 	 * DMA_ERROR_CODE. For all other errors the functionality is
332 	 * not altered.
333 	 */
334 	if (unlikely(build_fail)) {
335 		__iommu_free(tbl, ret, npages);
336 		return DMA_ERROR_CODE;
337 	}
338 
339 	/* Flush/invalidate TLB caches if necessary */
340 	if (tbl->it_ops->flush)
341 		tbl->it_ops->flush(tbl);
342 
343 	/* Make sure updates are seen by hardware */
344 	mb();
345 
346 	return ret;
347 }
348 
349 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
350 			     unsigned int npages)
351 {
352 	unsigned long entry, free_entry;
353 
354 	entry = dma_addr >> tbl->it_page_shift;
355 	free_entry = entry - tbl->it_offset;
356 
357 	if (((free_entry + npages) > tbl->it_size) ||
358 	    (entry < tbl->it_offset)) {
359 		if (printk_ratelimit()) {
360 			printk(KERN_INFO "iommu_free: invalid entry\n");
361 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
362 			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
363 			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
364 			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
365 			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
366 			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
367 			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
368 			WARN_ON(1);
369 		}
370 
371 		return false;
372 	}
373 
374 	return true;
375 }
376 
377 static struct iommu_pool *get_pool(struct iommu_table *tbl,
378 				   unsigned long entry)
379 {
380 	struct iommu_pool *p;
381 	unsigned long largepool_start = tbl->large_pool.start;
382 
383 	/* The large pool is the last pool at the top of the table */
384 	if (entry >= largepool_start) {
385 		p = &tbl->large_pool;
386 	} else {
387 		unsigned int pool_nr = entry / tbl->poolsize;
388 
389 		BUG_ON(pool_nr > tbl->nr_pools);
390 		p = &tbl->pools[pool_nr];
391 	}
392 
393 	return p;
394 }
395 
396 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
397 			 unsigned int npages)
398 {
399 	unsigned long entry, free_entry;
400 	unsigned long flags;
401 	struct iommu_pool *pool;
402 
403 	entry = dma_addr >> tbl->it_page_shift;
404 	free_entry = entry - tbl->it_offset;
405 
406 	pool = get_pool(tbl, free_entry);
407 
408 	if (!iommu_free_check(tbl, dma_addr, npages))
409 		return;
410 
411 	tbl->it_ops->clear(tbl, entry, npages);
412 
413 	spin_lock_irqsave(&(pool->lock), flags);
414 	bitmap_clear(tbl->it_map, free_entry, npages);
415 	spin_unlock_irqrestore(&(pool->lock), flags);
416 }
417 
418 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
419 		unsigned int npages)
420 {
421 	__iommu_free(tbl, dma_addr, npages);
422 
423 	/* Make sure TLB cache is flushed if the HW needs it. We do
424 	 * not do an mb() here on purpose, it is not needed on any of
425 	 * the current platforms.
426 	 */
427 	if (tbl->it_ops->flush)
428 		tbl->it_ops->flush(tbl);
429 }
430 
431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
432 		     struct scatterlist *sglist, int nelems,
433 		     unsigned long mask, enum dma_data_direction direction,
434 		     struct dma_attrs *attrs)
435 {
436 	dma_addr_t dma_next = 0, dma_addr;
437 	struct scatterlist *s, *outs, *segstart;
438 	int outcount, incount, i, build_fail = 0;
439 	unsigned int align;
440 	unsigned long handle;
441 	unsigned int max_seg_size;
442 
443 	BUG_ON(direction == DMA_NONE);
444 
445 	if ((nelems == 0) || !tbl)
446 		return 0;
447 
448 	outs = s = segstart = &sglist[0];
449 	outcount = 1;
450 	incount = nelems;
451 	handle = 0;
452 
453 	/* Init first segment length for backout at failure */
454 	outs->dma_length = 0;
455 
456 	DBG("sg mapping %d elements:\n", nelems);
457 
458 	max_seg_size = dma_get_max_seg_size(dev);
459 	for_each_sg(sglist, s, nelems, i) {
460 		unsigned long vaddr, npages, entry, slen;
461 
462 		slen = s->length;
463 		/* Sanity check */
464 		if (slen == 0) {
465 			dma_next = 0;
466 			continue;
467 		}
468 		/* Allocate iommu entries for that segment */
469 		vaddr = (unsigned long) sg_virt(s);
470 		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
471 		align = 0;
472 		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
473 		    (vaddr & ~PAGE_MASK) == 0)
474 			align = PAGE_SHIFT - tbl->it_page_shift;
475 		entry = iommu_range_alloc(dev, tbl, npages, &handle,
476 					  mask >> tbl->it_page_shift, align);
477 
478 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
479 
480 		/* Handle failure */
481 		if (unlikely(entry == DMA_ERROR_CODE)) {
482 			if (printk_ratelimit())
483 				dev_info(dev, "iommu_alloc failed, tbl %p "
484 					 "vaddr %lx npages %lu\n", tbl, vaddr,
485 					 npages);
486 			goto failure;
487 		}
488 
489 		/* Convert entry to a dma_addr_t */
490 		entry += tbl->it_offset;
491 		dma_addr = entry << tbl->it_page_shift;
492 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
493 
494 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
495 			    npages, entry, dma_addr);
496 
497 		/* Insert into HW table */
498 		build_fail = tbl->it_ops->set(tbl, entry, npages,
499 					      vaddr & IOMMU_PAGE_MASK(tbl),
500 					      direction, attrs);
501 		if(unlikely(build_fail))
502 			goto failure;
503 
504 		/* If we are in an open segment, try merging */
505 		if (segstart != s) {
506 			DBG("  - trying merge...\n");
507 			/* We cannot merge if:
508 			 * - allocated dma_addr isn't contiguous to previous allocation
509 			 */
510 			if (novmerge || (dma_addr != dma_next) ||
511 			    (outs->dma_length + s->length > max_seg_size)) {
512 				/* Can't merge: create a new segment */
513 				segstart = s;
514 				outcount++;
515 				outs = sg_next(outs);
516 				DBG("    can't merge, new segment.\n");
517 			} else {
518 				outs->dma_length += s->length;
519 				DBG("    merged, new len: %ux\n", outs->dma_length);
520 			}
521 		}
522 
523 		if (segstart == s) {
524 			/* This is a new segment, fill entries */
525 			DBG("  - filling new segment.\n");
526 			outs->dma_address = dma_addr;
527 			outs->dma_length = slen;
528 		}
529 
530 		/* Calculate next page pointer for contiguous check */
531 		dma_next = dma_addr + slen;
532 
533 		DBG("  - dma next is: %lx\n", dma_next);
534 	}
535 
536 	/* Flush/invalidate TLB caches if necessary */
537 	if (tbl->it_ops->flush)
538 		tbl->it_ops->flush(tbl);
539 
540 	DBG("mapped %d elements:\n", outcount);
541 
542 	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
543 	 * next entry of the sglist if we didn't fill the list completely
544 	 */
545 	if (outcount < incount) {
546 		outs = sg_next(outs);
547 		outs->dma_address = DMA_ERROR_CODE;
548 		outs->dma_length = 0;
549 	}
550 
551 	/* Make sure updates are seen by hardware */
552 	mb();
553 
554 	return outcount;
555 
556  failure:
557 	for_each_sg(sglist, s, nelems, i) {
558 		if (s->dma_length != 0) {
559 			unsigned long vaddr, npages;
560 
561 			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
562 			npages = iommu_num_pages(s->dma_address, s->dma_length,
563 						 IOMMU_PAGE_SIZE(tbl));
564 			__iommu_free(tbl, vaddr, npages);
565 			s->dma_address = DMA_ERROR_CODE;
566 			s->dma_length = 0;
567 		}
568 		if (s == outs)
569 			break;
570 	}
571 	return 0;
572 }
573 
574 
575 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
576 			int nelems, enum dma_data_direction direction,
577 			struct dma_attrs *attrs)
578 {
579 	struct scatterlist *sg;
580 
581 	BUG_ON(direction == DMA_NONE);
582 
583 	if (!tbl)
584 		return;
585 
586 	sg = sglist;
587 	while (nelems--) {
588 		unsigned int npages;
589 		dma_addr_t dma_handle = sg->dma_address;
590 
591 		if (sg->dma_length == 0)
592 			break;
593 		npages = iommu_num_pages(dma_handle, sg->dma_length,
594 					 IOMMU_PAGE_SIZE(tbl));
595 		__iommu_free(tbl, dma_handle, npages);
596 		sg = sg_next(sg);
597 	}
598 
599 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
600 	 * do not do an mb() here, the affected platforms do not need it
601 	 * when freeing.
602 	 */
603 	if (tbl->it_ops->flush)
604 		tbl->it_ops->flush(tbl);
605 }
606 
607 static void iommu_table_clear(struct iommu_table *tbl)
608 {
609 	/*
610 	 * In case of firmware assisted dump system goes through clean
611 	 * reboot process at the time of system crash. Hence it's safe to
612 	 * clear the TCE entries if firmware assisted dump is active.
613 	 */
614 	if (!is_kdump_kernel() || is_fadump_active()) {
615 		/* Clear the table in case firmware left allocations in it */
616 		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
617 		return;
618 	}
619 
620 #ifdef CONFIG_CRASH_DUMP
621 	if (tbl->it_ops->get) {
622 		unsigned long index, tceval, tcecount = 0;
623 
624 		/* Reserve the existing mappings left by the first kernel. */
625 		for (index = 0; index < tbl->it_size; index++) {
626 			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
627 			/*
628 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
629 			 */
630 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
631 				__set_bit(index, tbl->it_map);
632 				tcecount++;
633 			}
634 		}
635 
636 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
637 			printk(KERN_WARNING "TCE table is full; freeing ");
638 			printk(KERN_WARNING "%d entries for the kdump boot\n",
639 				KDUMP_MIN_TCE_ENTRIES);
640 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
641 				index < tbl->it_size; index++)
642 				__clear_bit(index, tbl->it_map);
643 		}
644 	}
645 #endif
646 }
647 
648 /*
649  * Build a iommu_table structure.  This contains a bit map which
650  * is used to manage allocation of the tce space.
651  */
652 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
653 {
654 	unsigned long sz;
655 	static int welcomed = 0;
656 	struct page *page;
657 	unsigned int i;
658 	struct iommu_pool *p;
659 
660 	BUG_ON(!tbl->it_ops);
661 
662 	/* number of bytes needed for the bitmap */
663 	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
664 
665 	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
666 	if (!page)
667 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
668 	tbl->it_map = page_address(page);
669 	memset(tbl->it_map, 0, sz);
670 
671 	/*
672 	 * Reserve page 0 so it will not be used for any mappings.
673 	 * This avoids buggy drivers that consider page 0 to be invalid
674 	 * to crash the machine or even lose data.
675 	 */
676 	if (tbl->it_offset == 0)
677 		set_bit(0, tbl->it_map);
678 
679 	/* We only split the IOMMU table if we have 1GB or more of space */
680 	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
681 		tbl->nr_pools = IOMMU_NR_POOLS;
682 	else
683 		tbl->nr_pools = 1;
684 
685 	/* We reserve the top 1/4 of the table for large allocations */
686 	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
687 
688 	for (i = 0; i < tbl->nr_pools; i++) {
689 		p = &tbl->pools[i];
690 		spin_lock_init(&(p->lock));
691 		p->start = tbl->poolsize * i;
692 		p->hint = p->start;
693 		p->end = p->start + tbl->poolsize;
694 	}
695 
696 	p = &tbl->large_pool;
697 	spin_lock_init(&(p->lock));
698 	p->start = tbl->poolsize * i;
699 	p->hint = p->start;
700 	p->end = tbl->it_size;
701 
702 	iommu_table_clear(tbl);
703 
704 	if (!welcomed) {
705 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
706 		       novmerge ? "disabled" : "enabled");
707 		welcomed = 1;
708 	}
709 
710 	return tbl;
711 }
712 
713 void iommu_free_table(struct iommu_table *tbl, const char *node_name)
714 {
715 	unsigned long bitmap_sz;
716 	unsigned int order;
717 
718 	if (!tbl)
719 		return;
720 
721 	if (!tbl->it_map) {
722 		kfree(tbl);
723 		return;
724 	}
725 
726 	/*
727 	 * In case we have reserved the first bit, we should not emit
728 	 * the warning below.
729 	 */
730 	if (tbl->it_offset == 0)
731 		clear_bit(0, tbl->it_map);
732 
733 	/* verify that table contains no entries */
734 	if (!bitmap_empty(tbl->it_map, tbl->it_size))
735 		pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
736 
737 	/* calculate bitmap size in bytes */
738 	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
739 
740 	/* free bitmap */
741 	order = get_order(bitmap_sz);
742 	free_pages((unsigned long) tbl->it_map, order);
743 
744 	/* free table */
745 	kfree(tbl);
746 }
747 
748 /* Creates TCEs for a user provided buffer.  The user buffer must be
749  * contiguous real kernel storage (not vmalloc).  The address passed here
750  * comprises a page address and offset into that page. The dma_addr_t
751  * returned will point to the same byte within the page as was passed in.
752  */
753 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
754 			  struct page *page, unsigned long offset, size_t size,
755 			  unsigned long mask, enum dma_data_direction direction,
756 			  struct dma_attrs *attrs)
757 {
758 	dma_addr_t dma_handle = DMA_ERROR_CODE;
759 	void *vaddr;
760 	unsigned long uaddr;
761 	unsigned int npages, align;
762 
763 	BUG_ON(direction == DMA_NONE);
764 
765 	vaddr = page_address(page) + offset;
766 	uaddr = (unsigned long)vaddr;
767 	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
768 
769 	if (tbl) {
770 		align = 0;
771 		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
772 		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
773 			align = PAGE_SHIFT - tbl->it_page_shift;
774 
775 		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
776 					 mask >> tbl->it_page_shift, align,
777 					 attrs);
778 		if (dma_handle == DMA_ERROR_CODE) {
779 			if (printk_ratelimit())  {
780 				dev_info(dev, "iommu_alloc failed, tbl %p "
781 					 "vaddr %p npages %d\n", tbl, vaddr,
782 					 npages);
783 			}
784 		} else
785 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
786 	}
787 
788 	return dma_handle;
789 }
790 
791 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
792 		      size_t size, enum dma_data_direction direction,
793 		      struct dma_attrs *attrs)
794 {
795 	unsigned int npages;
796 
797 	BUG_ON(direction == DMA_NONE);
798 
799 	if (tbl) {
800 		npages = iommu_num_pages(dma_handle, size,
801 					 IOMMU_PAGE_SIZE(tbl));
802 		iommu_free(tbl, dma_handle, npages);
803 	}
804 }
805 
806 /* Allocates a contiguous real buffer and creates mappings over it.
807  * Returns the virtual address of the buffer and sets dma_handle
808  * to the dma address (mapping) of the first page.
809  */
810 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
811 			   size_t size,	dma_addr_t *dma_handle,
812 			   unsigned long mask, gfp_t flag, int node)
813 {
814 	void *ret = NULL;
815 	dma_addr_t mapping;
816 	unsigned int order;
817 	unsigned int nio_pages, io_order;
818 	struct page *page;
819 
820 	size = PAGE_ALIGN(size);
821 	order = get_order(size);
822 
823  	/*
824 	 * Client asked for way too much space.  This is checked later
825 	 * anyway.  It is easier to debug here for the drivers than in
826 	 * the tce tables.
827 	 */
828 	if (order >= IOMAP_MAX_ORDER) {
829 		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
830 			 size);
831 		return NULL;
832 	}
833 
834 	if (!tbl)
835 		return NULL;
836 
837 	/* Alloc enough pages (and possibly more) */
838 	page = alloc_pages_node(node, flag, order);
839 	if (!page)
840 		return NULL;
841 	ret = page_address(page);
842 	memset(ret, 0, size);
843 
844 	/* Set up tces to cover the allocated range */
845 	nio_pages = size >> tbl->it_page_shift;
846 	io_order = get_iommu_order(size, tbl);
847 	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
848 			      mask >> tbl->it_page_shift, io_order, NULL);
849 	if (mapping == DMA_ERROR_CODE) {
850 		free_pages((unsigned long)ret, order);
851 		return NULL;
852 	}
853 	*dma_handle = mapping;
854 	return ret;
855 }
856 
857 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
858 			 void *vaddr, dma_addr_t dma_handle)
859 {
860 	if (tbl) {
861 		unsigned int nio_pages;
862 
863 		size = PAGE_ALIGN(size);
864 		nio_pages = size >> tbl->it_page_shift;
865 		iommu_free(tbl, dma_handle, nio_pages);
866 		size = PAGE_ALIGN(size);
867 		free_pages((unsigned long)vaddr, get_order(size));
868 	}
869 }
870 
871 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
872 {
873 	switch (dir) {
874 	case DMA_BIDIRECTIONAL:
875 		return TCE_PCI_READ | TCE_PCI_WRITE;
876 	case DMA_FROM_DEVICE:
877 		return TCE_PCI_WRITE;
878 	case DMA_TO_DEVICE:
879 		return TCE_PCI_READ;
880 	default:
881 		return 0;
882 	}
883 }
884 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
885 
886 #ifdef CONFIG_IOMMU_API
887 /*
888  * SPAPR TCE API
889  */
890 static void group_release(void *iommu_data)
891 {
892 	struct iommu_table_group *table_group = iommu_data;
893 
894 	table_group->group = NULL;
895 }
896 
897 void iommu_register_group(struct iommu_table_group *table_group,
898 		int pci_domain_number, unsigned long pe_num)
899 {
900 	struct iommu_group *grp;
901 	char *name;
902 
903 	grp = iommu_group_alloc();
904 	if (IS_ERR(grp)) {
905 		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
906 				PTR_ERR(grp));
907 		return;
908 	}
909 	table_group->group = grp;
910 	iommu_group_set_iommudata(grp, table_group, group_release);
911 	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
912 			pci_domain_number, pe_num);
913 	if (!name)
914 		return;
915 	iommu_group_set_name(grp, name);
916 	kfree(name);
917 }
918 
919 enum dma_data_direction iommu_tce_direction(unsigned long tce)
920 {
921 	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
922 		return DMA_BIDIRECTIONAL;
923 	else if (tce & TCE_PCI_READ)
924 		return DMA_TO_DEVICE;
925 	else if (tce & TCE_PCI_WRITE)
926 		return DMA_FROM_DEVICE;
927 	else
928 		return DMA_NONE;
929 }
930 EXPORT_SYMBOL_GPL(iommu_tce_direction);
931 
932 void iommu_flush_tce(struct iommu_table *tbl)
933 {
934 	/* Flush/invalidate TLB caches if necessary */
935 	if (tbl->it_ops->flush)
936 		tbl->it_ops->flush(tbl);
937 
938 	/* Make sure updates are seen by hardware */
939 	mb();
940 }
941 EXPORT_SYMBOL_GPL(iommu_flush_tce);
942 
943 int iommu_tce_clear_param_check(struct iommu_table *tbl,
944 		unsigned long ioba, unsigned long tce_value,
945 		unsigned long npages)
946 {
947 	/* tbl->it_ops->clear() does not support any value but 0 */
948 	if (tce_value)
949 		return -EINVAL;
950 
951 	if (ioba & ~IOMMU_PAGE_MASK(tbl))
952 		return -EINVAL;
953 
954 	ioba >>= tbl->it_page_shift;
955 	if (ioba < tbl->it_offset)
956 		return -EINVAL;
957 
958 	if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
959 		return -EINVAL;
960 
961 	return 0;
962 }
963 EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
964 
965 int iommu_tce_put_param_check(struct iommu_table *tbl,
966 		unsigned long ioba, unsigned long tce)
967 {
968 	if (tce & ~IOMMU_PAGE_MASK(tbl))
969 		return -EINVAL;
970 
971 	if (ioba & ~IOMMU_PAGE_MASK(tbl))
972 		return -EINVAL;
973 
974 	ioba >>= tbl->it_page_shift;
975 	if (ioba < tbl->it_offset)
976 		return -EINVAL;
977 
978 	if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
979 		return -EINVAL;
980 
981 	return 0;
982 }
983 EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
984 
985 long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
986 		unsigned long *hpa, enum dma_data_direction *direction)
987 {
988 	long ret;
989 
990 	ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
991 
992 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
993 			(*direction == DMA_BIDIRECTIONAL)))
994 		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
995 
996 	/* if (unlikely(ret))
997 		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
998 			__func__, hwaddr, entry << tbl->it_page_shift,
999 				hwaddr, ret); */
1000 
1001 	return ret;
1002 }
1003 EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1004 
1005 int iommu_take_ownership(struct iommu_table *tbl)
1006 {
1007 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1008 	int ret = 0;
1009 
1010 	/*
1011 	 * VFIO does not control TCE entries allocation and the guest
1012 	 * can write new TCEs on top of existing ones so iommu_tce_build()
1013 	 * must be able to release old pages. This functionality
1014 	 * requires exchange() callback defined so if it is not
1015 	 * implemented, we disallow taking ownership over the table.
1016 	 */
1017 	if (!tbl->it_ops->exchange)
1018 		return -EINVAL;
1019 
1020 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1021 	for (i = 0; i < tbl->nr_pools; i++)
1022 		spin_lock(&tbl->pools[i].lock);
1023 
1024 	if (tbl->it_offset == 0)
1025 		clear_bit(0, tbl->it_map);
1026 
1027 	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1028 		pr_err("iommu_tce: it_map is not empty");
1029 		ret = -EBUSY;
1030 		/* Restore bit#0 set by iommu_init_table() */
1031 		if (tbl->it_offset == 0)
1032 			set_bit(0, tbl->it_map);
1033 	} else {
1034 		memset(tbl->it_map, 0xff, sz);
1035 	}
1036 
1037 	for (i = 0; i < tbl->nr_pools; i++)
1038 		spin_unlock(&tbl->pools[i].lock);
1039 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1040 
1041 	return ret;
1042 }
1043 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1044 
1045 void iommu_release_ownership(struct iommu_table *tbl)
1046 {
1047 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1048 
1049 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1050 	for (i = 0; i < tbl->nr_pools; i++)
1051 		spin_lock(&tbl->pools[i].lock);
1052 
1053 	memset(tbl->it_map, 0, sz);
1054 
1055 	/* Restore bit#0 set by iommu_init_table() */
1056 	if (tbl->it_offset == 0)
1057 		set_bit(0, tbl->it_map);
1058 
1059 	for (i = 0; i < tbl->nr_pools; i++)
1060 		spin_unlock(&tbl->pools[i].lock);
1061 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1062 }
1063 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1064 
1065 int iommu_add_device(struct device *dev)
1066 {
1067 	struct iommu_table *tbl;
1068 	struct iommu_table_group_link *tgl;
1069 
1070 	/*
1071 	 * The sysfs entries should be populated before
1072 	 * binding IOMMU group. If sysfs entries isn't
1073 	 * ready, we simply bail.
1074 	 */
1075 	if (!device_is_registered(dev))
1076 		return -ENOENT;
1077 
1078 	if (dev->iommu_group) {
1079 		pr_debug("%s: Skipping device %s with iommu group %d\n",
1080 			 __func__, dev_name(dev),
1081 			 iommu_group_id(dev->iommu_group));
1082 		return -EBUSY;
1083 	}
1084 
1085 	tbl = get_iommu_table_base(dev);
1086 	if (!tbl) {
1087 		pr_debug("%s: Skipping device %s with no tbl\n",
1088 			 __func__, dev_name(dev));
1089 		return 0;
1090 	}
1091 
1092 	tgl = list_first_entry_or_null(&tbl->it_group_list,
1093 			struct iommu_table_group_link, next);
1094 	if (!tgl) {
1095 		pr_debug("%s: Skipping device %s with no group\n",
1096 			 __func__, dev_name(dev));
1097 		return 0;
1098 	}
1099 	pr_debug("%s: Adding %s to iommu group %d\n",
1100 		 __func__, dev_name(dev),
1101 		 iommu_group_id(tgl->table_group->group));
1102 
1103 	if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1104 		pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1105 		       __func__, IOMMU_PAGE_SIZE(tbl),
1106 		       PAGE_SIZE, dev_name(dev));
1107 		return -EINVAL;
1108 	}
1109 
1110 	return iommu_group_add_device(tgl->table_group->group, dev);
1111 }
1112 EXPORT_SYMBOL_GPL(iommu_add_device);
1113 
1114 void iommu_del_device(struct device *dev)
1115 {
1116 	/*
1117 	 * Some devices might not have IOMMU table and group
1118 	 * and we needn't detach them from the associated
1119 	 * IOMMU groups
1120 	 */
1121 	if (!dev->iommu_group) {
1122 		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1123 			 dev_name(dev));
1124 		return;
1125 	}
1126 
1127 	iommu_group_remove_device(dev);
1128 }
1129 EXPORT_SYMBOL_GPL(iommu_del_device);
1130 
1131 static int tce_iommu_bus_notifier(struct notifier_block *nb,
1132                 unsigned long action, void *data)
1133 {
1134         struct device *dev = data;
1135 
1136         switch (action) {
1137         case BUS_NOTIFY_ADD_DEVICE:
1138                 return iommu_add_device(dev);
1139         case BUS_NOTIFY_DEL_DEVICE:
1140                 if (dev->iommu_group)
1141                         iommu_del_device(dev);
1142                 return 0;
1143         default:
1144                 return 0;
1145         }
1146 }
1147 
1148 static struct notifier_block tce_iommu_bus_nb = {
1149         .notifier_call = tce_iommu_bus_notifier,
1150 };
1151 
1152 int __init tce_iommu_bus_notifier_init(void)
1153 {
1154         bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1155         return 0;
1156 }
1157 #endif /* CONFIG_IOMMU_API */
1158