1 /*
2  * IOMMU implementation for Cell Broadband Processor Architecture
3  *
4  * (C) Copyright IBM Corporation 2006-2008
5  *
6  * Author: Jeremy Kerr <jk@ozlabs.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2, or (at your option)
11  * any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #undef DEBUG
24 
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
29 #include <linux/of.h>
30 #include <linux/of_platform.h>
31 #include <linux/slab.h>
32 #include <linux/memblock.h>
33 
34 #include <asm/prom.h>
35 #include <asm/iommu.h>
36 #include <asm/machdep.h>
37 #include <asm/pci-bridge.h>
38 #include <asm/udbg.h>
39 #include <asm/firmware.h>
40 #include <asm/cell-regs.h>
41 
42 #include "interrupt.h"
43 
44 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
45  * instead of leaving them mapped to some dummy page. This can be
46  * enabled once the appropriate workarounds for spider bugs have
47  * been enabled
48  */
49 #define CELL_IOMMU_REAL_UNMAP
50 
51 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
52  * IO PTEs based on the transfer direction. That can be enabled
53  * once spider-net has been fixed to pass the correct direction
54  * to the DMA mapping functions
55  */
56 #define CELL_IOMMU_STRICT_PROTECTION
57 
58 
59 #define NR_IOMMUS			2
60 
61 /* IOC mmap registers */
62 #define IOC_Reg_Size			0x2000
63 
64 #define IOC_IOPT_CacheInvd		0x908
65 #define IOC_IOPT_CacheInvd_NE_Mask	0xffe0000000000000ul
66 #define IOC_IOPT_CacheInvd_IOPTE_Mask	0x000003fffffffff8ul
67 #define IOC_IOPT_CacheInvd_Busy		0x0000000000000001ul
68 
69 #define IOC_IOST_Origin			0x918
70 #define IOC_IOST_Origin_E		0x8000000000000000ul
71 #define IOC_IOST_Origin_HW		0x0000000000000800ul
72 #define IOC_IOST_Origin_HL		0x0000000000000400ul
73 
74 #define IOC_IO_ExcpStat			0x920
75 #define IOC_IO_ExcpStat_V		0x8000000000000000ul
76 #define IOC_IO_ExcpStat_SPF_Mask	0x6000000000000000ul
77 #define IOC_IO_ExcpStat_SPF_S		0x6000000000000000ul
78 #define IOC_IO_ExcpStat_SPF_P		0x2000000000000000ul
79 #define IOC_IO_ExcpStat_ADDR_Mask	0x00000007fffff000ul
80 #define IOC_IO_ExcpStat_RW_Mask		0x0000000000000800ul
81 #define IOC_IO_ExcpStat_IOID_Mask	0x00000000000007fful
82 
83 #define IOC_IO_ExcpMask			0x928
84 #define IOC_IO_ExcpMask_SFE		0x4000000000000000ul
85 #define IOC_IO_ExcpMask_PFE		0x2000000000000000ul
86 
87 #define IOC_IOCmd_Offset		0x1000
88 
89 #define IOC_IOCmd_Cfg			0xc00
90 #define IOC_IOCmd_Cfg_TE		0x0000800000000000ul
91 
92 
93 /* Segment table entries */
94 #define IOSTE_V			0x8000000000000000ul /* valid */
95 #define IOSTE_H			0x4000000000000000ul /* cache hint */
96 #define IOSTE_PT_Base_RPN_Mask  0x3ffffffffffff000ul /* base RPN of IOPT */
97 #define IOSTE_NPPT_Mask		0x0000000000000fe0ul /* no. pages in IOPT */
98 #define IOSTE_PS_Mask		0x0000000000000007ul /* page size */
99 #define IOSTE_PS_4K		0x0000000000000001ul /*   - 4kB  */
100 #define IOSTE_PS_64K		0x0000000000000003ul /*   - 64kB */
101 #define IOSTE_PS_1M		0x0000000000000005ul /*   - 1MB  */
102 #define IOSTE_PS_16M		0x0000000000000007ul /*   - 16MB */
103 
104 
105 /* IOMMU sizing */
106 #define IO_SEGMENT_SHIFT	28
107 #define IO_PAGENO_BITS(shift)	(IO_SEGMENT_SHIFT - (shift))
108 
109 /* The high bit needs to be set on every DMA address */
110 #define SPIDER_DMA_OFFSET	0x80000000ul
111 
112 struct iommu_window {
113 	struct list_head list;
114 	struct cbe_iommu *iommu;
115 	unsigned long offset;
116 	unsigned long size;
117 	unsigned int ioid;
118 	struct iommu_table table;
119 };
120 
121 #define NAMESIZE 8
122 struct cbe_iommu {
123 	int nid;
124 	char name[NAMESIZE];
125 	void __iomem *xlate_regs;
126 	void __iomem *cmd_regs;
127 	unsigned long *stab;
128 	unsigned long *ptab;
129 	void *pad_page;
130 	struct list_head windows;
131 };
132 
133 /* Static array of iommus, one per node
134  *   each contains a list of windows, keyed from dma_window property
135  *   - on bus setup, look for a matching window, or create one
136  *   - on dev setup, assign iommu_table ptr
137  */
138 static struct cbe_iommu iommus[NR_IOMMUS];
139 static int cbe_nr_iommus;
140 
141 static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
142 		long n_ptes)
143 {
144 	u64 __iomem *reg;
145 	u64 val;
146 	long n;
147 
148 	reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
149 
150 	while (n_ptes > 0) {
151 		/* we can invalidate up to 1 << 11 PTEs at once */
152 		n = min(n_ptes, 1l << 11);
153 		val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
154 			| (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
155 		        | IOC_IOPT_CacheInvd_Busy;
156 
157 		out_be64(reg, val);
158 		while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
159 			;
160 
161 		n_ptes -= n;
162 		pte += n;
163 	}
164 }
165 
166 static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
167 		unsigned long uaddr, enum dma_data_direction direction,
168 		struct dma_attrs *attrs)
169 {
170 	int i;
171 	unsigned long *io_pte, base_pte;
172 	struct iommu_window *window =
173 		container_of(tbl, struct iommu_window, table);
174 
175 	/* implementing proper protection causes problems with the spidernet
176 	 * driver - check mapping directions later, but allow read & write by
177 	 * default for now.*/
178 #ifdef CELL_IOMMU_STRICT_PROTECTION
179 	/* to avoid referencing a global, we use a trick here to setup the
180 	 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
181 	 * together for each of the 3 supported direction values. It is then
182 	 * shifted left so that the fields matching the desired direction
183 	 * lands on the appropriate bits, and other bits are masked out.
184 	 */
185 	const unsigned long prot = 0xc48;
186 	base_pte =
187 		((prot << (52 + 4 * direction)) &
188 		 (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
189 		CBE_IOPTE_M | CBE_IOPTE_SO_RW |
190 		(window->ioid & CBE_IOPTE_IOID_Mask);
191 #else
192 	base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
193 		CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
194 #endif
195 	if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
196 		base_pte &= ~CBE_IOPTE_SO_RW;
197 
198 	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
199 
200 	for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
201 		io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
202 
203 	mb();
204 
205 	invalidate_tce_cache(window->iommu, io_pte, npages);
206 
207 	pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
208 		 index, npages, direction, base_pte);
209 	return 0;
210 }
211 
212 static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
213 {
214 
215 	int i;
216 	unsigned long *io_pte, pte;
217 	struct iommu_window *window =
218 		container_of(tbl, struct iommu_window, table);
219 
220 	pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
221 
222 #ifdef CELL_IOMMU_REAL_UNMAP
223 	pte = 0;
224 #else
225 	/* spider bridge does PCI reads after freeing - insert a mapping
226 	 * to a scratch page instead of an invalid entry */
227 	pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
228 		__pa(window->iommu->pad_page) |
229 		(window->ioid & CBE_IOPTE_IOID_Mask);
230 #endif
231 
232 	io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
233 
234 	for (i = 0; i < npages; i++)
235 		io_pte[i] = pte;
236 
237 	mb();
238 
239 	invalidate_tce_cache(window->iommu, io_pte, npages);
240 }
241 
242 static irqreturn_t ioc_interrupt(int irq, void *data)
243 {
244 	unsigned long stat, spf;
245 	struct cbe_iommu *iommu = data;
246 
247 	stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
248 	spf = stat & IOC_IO_ExcpStat_SPF_Mask;
249 
250 	/* Might want to rate limit it */
251 	printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
252 	printk(KERN_ERR "  V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
253 	       !!(stat & IOC_IO_ExcpStat_V),
254 	       (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
255 	       (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
256 	       (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
257 	       (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
258 	printk(KERN_ERR "  page=0x%016lx\n",
259 	       stat & IOC_IO_ExcpStat_ADDR_Mask);
260 
261 	/* clear interrupt */
262 	stat &= ~IOC_IO_ExcpStat_V;
263 	out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
264 
265 	return IRQ_HANDLED;
266 }
267 
268 static int cell_iommu_find_ioc(int nid, unsigned long *base)
269 {
270 	struct device_node *np;
271 	struct resource r;
272 
273 	*base = 0;
274 
275 	/* First look for new style /be nodes */
276 	for_each_node_by_name(np, "ioc") {
277 		if (of_node_to_nid(np) != nid)
278 			continue;
279 		if (of_address_to_resource(np, 0, &r)) {
280 			printk(KERN_ERR "iommu: can't get address for %s\n",
281 			       np->full_name);
282 			continue;
283 		}
284 		*base = r.start;
285 		of_node_put(np);
286 		return 0;
287 	}
288 
289 	/* Ok, let's try the old way */
290 	for_each_node_by_type(np, "cpu") {
291 		const unsigned int *nidp;
292 		const unsigned long *tmp;
293 
294 		nidp = of_get_property(np, "node-id", NULL);
295 		if (nidp && *nidp == nid) {
296 			tmp = of_get_property(np, "ioc-translation", NULL);
297 			if (tmp) {
298 				*base = *tmp;
299 				of_node_put(np);
300 				return 0;
301 			}
302 		}
303 	}
304 
305 	return -ENODEV;
306 }
307 
308 static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
309 				unsigned long dbase, unsigned long dsize,
310 				unsigned long fbase, unsigned long fsize)
311 {
312 	struct page *page;
313 	unsigned long segments, stab_size;
314 
315 	segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
316 
317 	pr_debug("%s: iommu[%d]: segments: %lu\n",
318 			__func__, iommu->nid, segments);
319 
320 	/* set up the segment table */
321 	stab_size = segments * sizeof(unsigned long);
322 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
323 	BUG_ON(!page);
324 	iommu->stab = page_address(page);
325 	memset(iommu->stab, 0, stab_size);
326 }
327 
328 static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
329 		unsigned long base, unsigned long size, unsigned long gap_base,
330 		unsigned long gap_size, unsigned long page_shift)
331 {
332 	struct page *page;
333 	int i;
334 	unsigned long reg, segments, pages_per_segment, ptab_size,
335 		      n_pte_pages, start_seg, *ptab;
336 
337 	start_seg = base >> IO_SEGMENT_SHIFT;
338 	segments  = size >> IO_SEGMENT_SHIFT;
339 	pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
340 	/* PTEs for each segment must start on a 4K bounday */
341 	pages_per_segment = max(pages_per_segment,
342 				(1 << 12) / sizeof(unsigned long));
343 
344 	ptab_size = segments * pages_per_segment * sizeof(unsigned long);
345 	pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
346 			iommu->nid, ptab_size, get_order(ptab_size));
347 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
348 	BUG_ON(!page);
349 
350 	ptab = page_address(page);
351 	memset(ptab, 0, ptab_size);
352 
353 	/* number of 4K pages needed for a page table */
354 	n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
355 
356 	pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
357 			__func__, iommu->nid, iommu->stab, ptab,
358 			n_pte_pages);
359 
360 	/* initialise the STEs */
361 	reg = IOSTE_V | ((n_pte_pages - 1) << 5);
362 
363 	switch (page_shift) {
364 	case 12: reg |= IOSTE_PS_4K;  break;
365 	case 16: reg |= IOSTE_PS_64K; break;
366 	case 20: reg |= IOSTE_PS_1M;  break;
367 	case 24: reg |= IOSTE_PS_16M; break;
368 	default: BUG();
369 	}
370 
371 	gap_base = gap_base >> IO_SEGMENT_SHIFT;
372 	gap_size = gap_size >> IO_SEGMENT_SHIFT;
373 
374 	pr_debug("Setting up IOMMU stab:\n");
375 	for (i = start_seg; i < (start_seg + segments); i++) {
376 		if (i >= gap_base && i < (gap_base + gap_size)) {
377 			pr_debug("\toverlap at %d, skipping\n", i);
378 			continue;
379 		}
380 		iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
381 					(i - start_seg));
382 		pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
383 	}
384 
385 	return ptab;
386 }
387 
388 static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
389 {
390 	int ret;
391 	unsigned long reg, xlate_base;
392 	unsigned int virq;
393 
394 	if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
395 		panic("%s: missing IOC register mappings for node %d\n",
396 		      __func__, iommu->nid);
397 
398 	iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
399 	iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
400 
401 	/* ensure that the STEs have updated */
402 	mb();
403 
404 	/* setup interrupts for the iommu. */
405 	reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
406 	out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
407 			reg & ~IOC_IO_ExcpStat_V);
408 	out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
409 			IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
410 
411 	virq = irq_create_mapping(NULL,
412 			IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
413 	BUG_ON(virq == NO_IRQ);
414 
415 	ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
416 	BUG_ON(ret);
417 
418 	/* set the IOC segment table origin register (and turn on the iommu) */
419 	reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
420 	out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
421 	in_be64(iommu->xlate_regs + IOC_IOST_Origin);
422 
423 	/* turn on IO translation */
424 	reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
425 	out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
426 }
427 
428 static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
429 	unsigned long base, unsigned long size)
430 {
431 	cell_iommu_setup_stab(iommu, base, size, 0, 0);
432 	iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
433 					    IOMMU_PAGE_SHIFT_4K);
434 	cell_iommu_enable_hardware(iommu);
435 }
436 
437 #if 0/* Unused for now */
438 static struct iommu_window *find_window(struct cbe_iommu *iommu,
439 		unsigned long offset, unsigned long size)
440 {
441 	struct iommu_window *window;
442 
443 	/* todo: check for overlapping (but not equal) windows) */
444 
445 	list_for_each_entry(window, &(iommu->windows), list) {
446 		if (window->offset == offset && window->size == size)
447 			return window;
448 	}
449 
450 	return NULL;
451 }
452 #endif
453 
454 static inline u32 cell_iommu_get_ioid(struct device_node *np)
455 {
456 	const u32 *ioid;
457 
458 	ioid = of_get_property(np, "ioid", NULL);
459 	if (ioid == NULL) {
460 		printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
461 		       np->full_name);
462 		return 0;
463 	}
464 
465 	return *ioid;
466 }
467 
468 static struct iommu_window * __init
469 cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
470 			unsigned long offset, unsigned long size,
471 			unsigned long pte_offset)
472 {
473 	struct iommu_window *window;
474 	struct page *page;
475 	u32 ioid;
476 
477 	ioid = cell_iommu_get_ioid(np);
478 
479 	window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
480 	BUG_ON(window == NULL);
481 
482 	window->offset = offset;
483 	window->size = size;
484 	window->ioid = ioid;
485 	window->iommu = iommu;
486 
487 	window->table.it_blocksize = 16;
488 	window->table.it_base = (unsigned long)iommu->ptab;
489 	window->table.it_index = iommu->nid;
490 	window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
491 	window->table.it_offset =
492 		(offset >> window->table.it_page_shift) + pte_offset;
493 	window->table.it_size = size >> window->table.it_page_shift;
494 
495 	iommu_init_table(&window->table, iommu->nid);
496 
497 	pr_debug("\tioid      %d\n", window->ioid);
498 	pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
499 	pr_debug("\tbase      0x%016lx\n", window->table.it_base);
500 	pr_debug("\toffset    0x%lx\n", window->table.it_offset);
501 	pr_debug("\tsize      %ld\n", window->table.it_size);
502 
503 	list_add(&window->list, &iommu->windows);
504 
505 	if (offset != 0)
506 		return window;
507 
508 	/* We need to map and reserve the first IOMMU page since it's used
509 	 * by the spider workaround. In theory, we only need to do that when
510 	 * running on spider but it doesn't really matter.
511 	 *
512 	 * This code also assumes that we have a window that starts at 0,
513 	 * which is the case on all spider based blades.
514 	 */
515 	page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
516 	BUG_ON(!page);
517 	iommu->pad_page = page_address(page);
518 	clear_page(iommu->pad_page);
519 
520 	__set_bit(0, window->table.it_map);
521 	tce_build_cell(&window->table, window->table.it_offset, 1,
522 		       (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
523 
524 	return window;
525 }
526 
527 static struct cbe_iommu *cell_iommu_for_node(int nid)
528 {
529 	int i;
530 
531 	for (i = 0; i < cbe_nr_iommus; i++)
532 		if (iommus[i].nid == nid)
533 			return &iommus[i];
534 	return NULL;
535 }
536 
537 static unsigned long cell_dma_direct_offset;
538 
539 static unsigned long dma_iommu_fixed_base;
540 
541 /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
542 static int iommu_fixed_is_weak;
543 
544 static struct iommu_table *cell_get_iommu_table(struct device *dev)
545 {
546 	struct iommu_window *window;
547 	struct cbe_iommu *iommu;
548 
549 	/* Current implementation uses the first window available in that
550 	 * node's iommu. We -might- do something smarter later though it may
551 	 * never be necessary
552 	 */
553 	iommu = cell_iommu_for_node(dev_to_node(dev));
554 	if (iommu == NULL || list_empty(&iommu->windows)) {
555 		dev_err(dev, "iommu: missing iommu for %s (node %d)\n",
556 		       of_node_full_name(dev->of_node), dev_to_node(dev));
557 		return NULL;
558 	}
559 	window = list_entry(iommu->windows.next, struct iommu_window, list);
560 
561 	return &window->table;
562 }
563 
564 /* A coherent allocation implies strong ordering */
565 
566 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
567 				      dma_addr_t *dma_handle, gfp_t flag,
568 				      struct dma_attrs *attrs)
569 {
570 	if (iommu_fixed_is_weak)
571 		return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
572 					    size, dma_handle,
573 					    device_to_mask(dev), flag,
574 					    dev_to_node(dev));
575 	else
576 		return dma_direct_ops.alloc(dev, size, dma_handle, flag,
577 					    attrs);
578 }
579 
580 static void dma_fixed_free_coherent(struct device *dev, size_t size,
581 				    void *vaddr, dma_addr_t dma_handle,
582 				    struct dma_attrs *attrs)
583 {
584 	if (iommu_fixed_is_weak)
585 		iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
586 				    dma_handle);
587 	else
588 		dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
589 }
590 
591 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
592 				     unsigned long offset, size_t size,
593 				     enum dma_data_direction direction,
594 				     struct dma_attrs *attrs)
595 {
596 	if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
597 		return dma_direct_ops.map_page(dev, page, offset, size,
598 					       direction, attrs);
599 	else
600 		return iommu_map_page(dev, cell_get_iommu_table(dev), page,
601 				      offset, size, device_to_mask(dev),
602 				      direction, attrs);
603 }
604 
605 static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
606 				 size_t size, enum dma_data_direction direction,
607 				 struct dma_attrs *attrs)
608 {
609 	if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
610 		dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
611 					  attrs);
612 	else
613 		iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
614 				 direction, attrs);
615 }
616 
617 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
618 			   int nents, enum dma_data_direction direction,
619 			   struct dma_attrs *attrs)
620 {
621 	if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
622 		return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
623 	else
624 		return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents,
625 				    device_to_mask(dev), direction, attrs);
626 }
627 
628 static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
629 			       int nents, enum dma_data_direction direction,
630 			       struct dma_attrs *attrs)
631 {
632 	if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
633 		dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
634 	else
635 		iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction,
636 			       attrs);
637 }
638 
639 static int dma_fixed_dma_supported(struct device *dev, u64 mask)
640 {
641 	return mask == DMA_BIT_MASK(64);
642 }
643 
644 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
645 
646 struct dma_map_ops dma_iommu_fixed_ops = {
647 	.alloc          = dma_fixed_alloc_coherent,
648 	.free           = dma_fixed_free_coherent,
649 	.map_sg         = dma_fixed_map_sg,
650 	.unmap_sg       = dma_fixed_unmap_sg,
651 	.dma_supported  = dma_fixed_dma_supported,
652 	.set_dma_mask   = dma_set_mask_and_switch,
653 	.map_page       = dma_fixed_map_page,
654 	.unmap_page     = dma_fixed_unmap_page,
655 };
656 
657 static void cell_dma_dev_setup_fixed(struct device *dev);
658 
659 static void cell_dma_dev_setup(struct device *dev)
660 {
661 	/* Order is important here, these are not mutually exclusive */
662 	if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
663 		cell_dma_dev_setup_fixed(dev);
664 	else if (get_pci_dma_ops() == &dma_iommu_ops)
665 		set_iommu_table_base(dev, cell_get_iommu_table(dev));
666 	else if (get_pci_dma_ops() == &dma_direct_ops)
667 		set_dma_offset(dev, cell_dma_direct_offset);
668 	else
669 		BUG();
670 }
671 
672 static void cell_pci_dma_dev_setup(struct pci_dev *dev)
673 {
674 	cell_dma_dev_setup(&dev->dev);
675 }
676 
677 static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
678 			      void *data)
679 {
680 	struct device *dev = data;
681 
682 	/* We are only intereted in device addition */
683 	if (action != BUS_NOTIFY_ADD_DEVICE)
684 		return 0;
685 
686 	/* We use the PCI DMA ops */
687 	dev->archdata.dma_ops = get_pci_dma_ops();
688 
689 	cell_dma_dev_setup(dev);
690 
691 	return 0;
692 }
693 
694 static struct notifier_block cell_of_bus_notifier = {
695 	.notifier_call = cell_of_bus_notify
696 };
697 
698 static int __init cell_iommu_get_window(struct device_node *np,
699 					 unsigned long *base,
700 					 unsigned long *size)
701 {
702 	const __be32 *dma_window;
703 	unsigned long index;
704 
705 	/* Use ibm,dma-window if available, else, hard code ! */
706 	dma_window = of_get_property(np, "ibm,dma-window", NULL);
707 	if (dma_window == NULL) {
708 		*base = 0;
709 		*size = 0x80000000u;
710 		return -ENODEV;
711 	}
712 
713 	of_parse_dma_window(np, dma_window, &index, base, size);
714 	return 0;
715 }
716 
717 static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
718 {
719 	struct cbe_iommu *iommu;
720 	int nid, i;
721 
722 	/* Get node ID */
723 	nid = of_node_to_nid(np);
724 	if (nid < 0) {
725 		printk(KERN_ERR "iommu: failed to get node for %s\n",
726 		       np->full_name);
727 		return NULL;
728 	}
729 	pr_debug("iommu: setting up iommu for node %d (%s)\n",
730 		 nid, np->full_name);
731 
732 	/* XXX todo: If we can have multiple windows on the same IOMMU, which
733 	 * isn't the case today, we probably want here to check whether the
734 	 * iommu for that node is already setup.
735 	 * However, there might be issue with getting the size right so let's
736 	 * ignore that for now. We might want to completely get rid of the
737 	 * multiple window support since the cell iommu supports per-page ioids
738 	 */
739 
740 	if (cbe_nr_iommus >= NR_IOMMUS) {
741 		printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
742 		       np->full_name);
743 		return NULL;
744 	}
745 
746 	/* Init base fields */
747 	i = cbe_nr_iommus++;
748 	iommu = &iommus[i];
749 	iommu->stab = NULL;
750 	iommu->nid = nid;
751 	snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
752 	INIT_LIST_HEAD(&iommu->windows);
753 
754 	return iommu;
755 }
756 
757 static void __init cell_iommu_init_one(struct device_node *np,
758 				       unsigned long offset)
759 {
760 	struct cbe_iommu *iommu;
761 	unsigned long base, size;
762 
763 	iommu = cell_iommu_alloc(np);
764 	if (!iommu)
765 		return;
766 
767 	/* Obtain a window for it */
768 	cell_iommu_get_window(np, &base, &size);
769 
770 	pr_debug("\ttranslating window 0x%lx...0x%lx\n",
771 		 base, base + size - 1);
772 
773 	/* Initialize the hardware */
774 	cell_iommu_setup_hardware(iommu, base, size);
775 
776 	/* Setup the iommu_table */
777 	cell_iommu_setup_window(iommu, np, base, size,
778 				offset >> IOMMU_PAGE_SHIFT_4K);
779 }
780 
781 static void __init cell_disable_iommus(void)
782 {
783 	int node;
784 	unsigned long base, val;
785 	void __iomem *xregs, *cregs;
786 
787 	/* Make sure IOC translation is disabled on all nodes */
788 	for_each_online_node(node) {
789 		if (cell_iommu_find_ioc(node, &base))
790 			continue;
791 		xregs = ioremap(base, IOC_Reg_Size);
792 		if (xregs == NULL)
793 			continue;
794 		cregs = xregs + IOC_IOCmd_Offset;
795 
796 		pr_debug("iommu: cleaning up iommu on node %d\n", node);
797 
798 		out_be64(xregs + IOC_IOST_Origin, 0);
799 		(void)in_be64(xregs + IOC_IOST_Origin);
800 		val = in_be64(cregs + IOC_IOCmd_Cfg);
801 		val &= ~IOC_IOCmd_Cfg_TE;
802 		out_be64(cregs + IOC_IOCmd_Cfg, val);
803 		(void)in_be64(cregs + IOC_IOCmd_Cfg);
804 
805 		iounmap(xregs);
806 	}
807 }
808 
809 static int __init cell_iommu_init_disabled(void)
810 {
811 	struct device_node *np = NULL;
812 	unsigned long base = 0, size;
813 
814 	/* When no iommu is present, we use direct DMA ops */
815 	set_pci_dma_ops(&dma_direct_ops);
816 
817 	/* First make sure all IOC translation is turned off */
818 	cell_disable_iommus();
819 
820 	/* If we have no Axon, we set up the spider DMA magic offset */
821 	if (of_find_node_by_name(NULL, "axon") == NULL)
822 		cell_dma_direct_offset = SPIDER_DMA_OFFSET;
823 
824 	/* Now we need to check to see where the memory is mapped
825 	 * in PCI space. We assume that all busses use the same dma
826 	 * window which is always the case so far on Cell, thus we
827 	 * pick up the first pci-internal node we can find and check
828 	 * the DMA window from there.
829 	 */
830 	for_each_node_by_name(np, "axon") {
831 		if (np->parent == NULL || np->parent->parent != NULL)
832 			continue;
833 		if (cell_iommu_get_window(np, &base, &size) == 0)
834 			break;
835 	}
836 	if (np == NULL) {
837 		for_each_node_by_name(np, "pci-internal") {
838 			if (np->parent == NULL || np->parent->parent != NULL)
839 				continue;
840 			if (cell_iommu_get_window(np, &base, &size) == 0)
841 				break;
842 		}
843 	}
844 	of_node_put(np);
845 
846 	/* If we found a DMA window, we check if it's big enough to enclose
847 	 * all of physical memory. If not, we force enable IOMMU
848 	 */
849 	if (np && size < memblock_end_of_DRAM()) {
850 		printk(KERN_WARNING "iommu: force-enabled, dma window"
851 		       " (%ldMB) smaller than total memory (%lldMB)\n",
852 		       size >> 20, memblock_end_of_DRAM() >> 20);
853 		return -ENODEV;
854 	}
855 
856 	cell_dma_direct_offset += base;
857 
858 	if (cell_dma_direct_offset != 0)
859 		ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
860 
861 	printk("iommu: disabled, direct DMA offset is 0x%lx\n",
862 	       cell_dma_direct_offset);
863 
864 	return 0;
865 }
866 
867 /*
868  *  Fixed IOMMU mapping support
869  *
870  *  This code adds support for setting up a fixed IOMMU mapping on certain
871  *  cell machines. For 64-bit devices this avoids the performance overhead of
872  *  mapping and unmapping pages at runtime. 32-bit devices are unable to use
873  *  the fixed mapping.
874  *
875  *  The fixed mapping is established at boot, and maps all of physical memory
876  *  1:1 into device space at some offset. On machines with < 30 GB of memory
877  *  we setup the fixed mapping immediately above the normal IOMMU window.
878  *
879  *  For example a machine with 4GB of memory would end up with the normal
880  *  IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
881  *  this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
882  *  3GB, plus any offset required by firmware. The firmware offset is encoded
883  *  in the "dma-ranges" property.
884  *
885  *  On machines with 30GB or more of memory, we are unable to place the fixed
886  *  mapping above the normal IOMMU window as we would run out of address space.
887  *  Instead we move the normal IOMMU window to coincide with the hash page
888  *  table, this region does not need to be part of the fixed mapping as no
889  *  device should ever be DMA'ing to it. We then setup the fixed mapping
890  *  from 0 to 32GB.
891  */
892 
893 static u64 cell_iommu_get_fixed_address(struct device *dev)
894 {
895 	u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
896 	struct device_node *np;
897 	const u32 *ranges = NULL;
898 	int i, len, best, naddr, nsize, pna, range_size;
899 
900 	np = of_node_get(dev->of_node);
901 	while (1) {
902 		naddr = of_n_addr_cells(np);
903 		nsize = of_n_size_cells(np);
904 		np = of_get_next_parent(np);
905 		if (!np)
906 			break;
907 
908 		ranges = of_get_property(np, "dma-ranges", &len);
909 
910 		/* Ignore empty ranges, they imply no translation required */
911 		if (ranges && len > 0)
912 			break;
913 	}
914 
915 	if (!ranges) {
916 		dev_dbg(dev, "iommu: no dma-ranges found\n");
917 		goto out;
918 	}
919 
920 	len /= sizeof(u32);
921 
922 	pna = of_n_addr_cells(np);
923 	range_size = naddr + nsize + pna;
924 
925 	/* dma-ranges format:
926 	 * child addr	: naddr cells
927 	 * parent addr	: pna cells
928 	 * size		: nsize cells
929 	 */
930 	for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
931 		cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
932 		size = of_read_number(ranges + i + naddr + pna, nsize);
933 
934 		if (cpu_addr == 0 && size > best_size) {
935 			best = i;
936 			best_size = size;
937 		}
938 	}
939 
940 	if (best >= 0) {
941 		dev_addr = of_read_number(ranges + best, naddr);
942 	} else
943 		dev_dbg(dev, "iommu: no suitable range found!\n");
944 
945 out:
946 	of_node_put(np);
947 
948 	return dev_addr;
949 }
950 
951 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
952 {
953 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
954 		return -EIO;
955 
956 	if (dma_mask == DMA_BIT_MASK(64) &&
957 		cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
958 	{
959 		dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
960 		set_dma_ops(dev, &dma_iommu_fixed_ops);
961 	} else {
962 		dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
963 		set_dma_ops(dev, get_pci_dma_ops());
964 	}
965 
966 	cell_dma_dev_setup(dev);
967 
968 	*dev->dma_mask = dma_mask;
969 
970 	return 0;
971 }
972 
973 static void cell_dma_dev_setup_fixed(struct device *dev)
974 {
975 	u64 addr;
976 
977 	addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
978 	set_dma_offset(dev, addr);
979 
980 	dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
981 }
982 
983 static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
984 			   unsigned long base_pte)
985 {
986 	unsigned long segment, offset;
987 
988 	segment = addr >> IO_SEGMENT_SHIFT;
989 	offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
990 	ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
991 
992 	pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
993 		  addr, ptab, segment, offset);
994 
995 	ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
996 }
997 
998 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
999 	struct device_node *np, unsigned long dbase, unsigned long dsize,
1000 	unsigned long fbase, unsigned long fsize)
1001 {
1002 	unsigned long base_pte, uaddr, ioaddr, *ptab;
1003 
1004 	ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
1005 
1006 	dma_iommu_fixed_base = fbase;
1007 
1008 	pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
1009 
1010 	base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
1011 		(cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
1012 
1013 	if (iommu_fixed_is_weak)
1014 		pr_info("IOMMU: Using weak ordering for fixed mapping\n");
1015 	else {
1016 		pr_info("IOMMU: Using strong ordering for fixed mapping\n");
1017 		base_pte |= CBE_IOPTE_SO_RW;
1018 	}
1019 
1020 	for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
1021 		/* Don't touch the dynamic region */
1022 		ioaddr = uaddr + fbase;
1023 		if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
1024 			pr_debug("iommu: fixed/dynamic overlap, skipping\n");
1025 			continue;
1026 		}
1027 
1028 		insert_16M_pte(uaddr, ptab, base_pte);
1029 	}
1030 
1031 	mb();
1032 }
1033 
1034 static int __init cell_iommu_fixed_mapping_init(void)
1035 {
1036 	unsigned long dbase, dsize, fbase, fsize, hbase, hend;
1037 	struct cbe_iommu *iommu;
1038 	struct device_node *np;
1039 
1040 	/* The fixed mapping is only supported on axon machines */
1041 	np = of_find_node_by_name(NULL, "axon");
1042 	of_node_put(np);
1043 
1044 	if (!np) {
1045 		pr_debug("iommu: fixed mapping disabled, no axons found\n");
1046 		return -1;
1047 	}
1048 
1049 	/* We must have dma-ranges properties for fixed mapping to work */
1050 	np = of_find_node_with_property(NULL, "dma-ranges");
1051 	of_node_put(np);
1052 
1053 	if (!np) {
1054 		pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
1055 		return -1;
1056 	}
1057 
1058 	/* The default setup is to have the fixed mapping sit after the
1059 	 * dynamic region, so find the top of the largest IOMMU window
1060 	 * on any axon, then add the size of RAM and that's our max value.
1061 	 * If that is > 32GB we have to do other shennanigans.
1062 	 */
1063 	fbase = 0;
1064 	for_each_node_by_name(np, "axon") {
1065 		cell_iommu_get_window(np, &dbase, &dsize);
1066 		fbase = max(fbase, dbase + dsize);
1067 	}
1068 
1069 	fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1070 	fsize = memblock_phys_mem_size();
1071 
1072 	if ((fbase + fsize) <= 0x800000000ul)
1073 		hbase = 0; /* use the device tree window */
1074 	else {
1075 		/* If we're over 32 GB we need to cheat. We can't map all of
1076 		 * RAM with the fixed mapping, and also fit the dynamic
1077 		 * region. So try to place the dynamic region where the hash
1078 		 * table sits, drivers never need to DMA to it, we don't
1079 		 * need a fixed mapping for that area.
1080 		 */
1081 		if (!htab_address) {
1082 			pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
1083 			return -1;
1084 		}
1085 		hbase = __pa(htab_address);
1086 		hend  = hbase + htab_size_bytes;
1087 
1088 		/* The window must start and end on a segment boundary */
1089 		if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
1090 		    (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
1091 			pr_debug("iommu: hash window not segment aligned\n");
1092 			return -1;
1093 		}
1094 
1095 		/* Check the hash window fits inside the real DMA window */
1096 		for_each_node_by_name(np, "axon") {
1097 			cell_iommu_get_window(np, &dbase, &dsize);
1098 
1099 			if (hbase < dbase || (hend > (dbase + dsize))) {
1100 				pr_debug("iommu: hash window doesn't fit in"
1101 					 "real DMA window\n");
1102 				return -1;
1103 			}
1104 		}
1105 
1106 		fbase = 0;
1107 	}
1108 
1109 	/* Setup the dynamic regions */
1110 	for_each_node_by_name(np, "axon") {
1111 		iommu = cell_iommu_alloc(np);
1112 		BUG_ON(!iommu);
1113 
1114 		if (hbase == 0)
1115 			cell_iommu_get_window(np, &dbase, &dsize);
1116 		else {
1117 			dbase = hbase;
1118 			dsize = htab_size_bytes;
1119 		}
1120 
1121 		printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1122 			"fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1123 			 dbase + dsize, fbase, fbase + fsize);
1124 
1125 		cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1126 		iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1127 						    IOMMU_PAGE_SHIFT_4K);
1128 		cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1129 					     fbase, fsize);
1130 		cell_iommu_enable_hardware(iommu);
1131 		cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1132 	}
1133 
1134 	dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
1135 	set_pci_dma_ops(&dma_iommu_ops);
1136 
1137 	return 0;
1138 }
1139 
1140 static int iommu_fixed_disabled;
1141 
1142 static int __init setup_iommu_fixed(char *str)
1143 {
1144 	struct device_node *pciep;
1145 
1146 	if (strcmp(str, "off") == 0)
1147 		iommu_fixed_disabled = 1;
1148 
1149 	/* If we can find a pcie-endpoint in the device tree assume that
1150 	 * we're on a triblade or a CAB so by default the fixed mapping
1151 	 * should be set to be weakly ordered; but only if the boot
1152 	 * option WASN'T set for strong ordering
1153 	 */
1154 	pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1155 
1156 	if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1157 		iommu_fixed_is_weak = 1;
1158 
1159 	of_node_put(pciep);
1160 
1161 	return 1;
1162 }
1163 __setup("iommu_fixed=", setup_iommu_fixed);
1164 
1165 static u64 cell_dma_get_required_mask(struct device *dev)
1166 {
1167 	struct dma_map_ops *dma_ops;
1168 
1169 	if (!dev->dma_mask)
1170 		return 0;
1171 
1172 	if (!iommu_fixed_disabled &&
1173 			cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1174 		return DMA_BIT_MASK(64);
1175 
1176 	dma_ops = get_dma_ops(dev);
1177 	if (dma_ops->get_required_mask)
1178 		return dma_ops->get_required_mask(dev);
1179 
1180 	WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1181 
1182 	return DMA_BIT_MASK(64);
1183 }
1184 
1185 static int __init cell_iommu_init(void)
1186 {
1187 	struct device_node *np;
1188 
1189 	/* If IOMMU is disabled or we have little enough RAM to not need
1190 	 * to enable it, we setup a direct mapping.
1191 	 *
1192 	 * Note: should we make sure we have the IOMMU actually disabled ?
1193 	 */
1194 	if (iommu_is_off ||
1195 	    (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1196 		if (cell_iommu_init_disabled() == 0)
1197 			goto bail;
1198 
1199 	/* Setup various ppc_md. callbacks */
1200 	ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
1201 	ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1202 	ppc_md.tce_build = tce_build_cell;
1203 	ppc_md.tce_free = tce_free_cell;
1204 
1205 	if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1206 		goto bail;
1207 
1208 	/* Create an iommu for each /axon node.  */
1209 	for_each_node_by_name(np, "axon") {
1210 		if (np->parent == NULL || np->parent->parent != NULL)
1211 			continue;
1212 		cell_iommu_init_one(np, 0);
1213 	}
1214 
1215 	/* Create an iommu for each toplevel /pci-internal node for
1216 	 * old hardware/firmware
1217 	 */
1218 	for_each_node_by_name(np, "pci-internal") {
1219 		if (np->parent == NULL || np->parent->parent != NULL)
1220 			continue;
1221 		cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
1222 	}
1223 
1224 	/* Setup default PCI iommu ops */
1225 	set_pci_dma_ops(&dma_iommu_ops);
1226 
1227  bail:
1228 	/* Register callbacks on OF platform device addition/removal
1229 	 * to handle linking them to the right DMA operations
1230 	 */
1231 	bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
1232 
1233 	return 0;
1234 }
1235 machine_arch_initcall(cell, cell_iommu_init);
1236 machine_arch_initcall(celleb_native, cell_iommu_init);
1237 
1238