xref: /openbmc/linux/arch/ia64/hp/common/sba_iommu.c (revision 82ced6fd)
1 /*
2 **  IA64 System Bus Adapter (SBA) I/O MMU manager
3 **
4 **	(c) Copyright 2002-2005 Alex Williamson
5 **	(c) Copyright 2002-2003 Grant Grundler
6 **	(c) Copyright 2002-2005 Hewlett-Packard Company
7 **
8 **	Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 **	Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10 **
11 **	This program is free software; you can redistribute it and/or modify
12 **	it under the terms of the GNU General Public License as published by
13 **      the Free Software Foundation; either version 2 of the License, or
14 **      (at your option) any later version.
15 **
16 **
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
19 **
20 */
21 
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
28 #include <linux/mm.h>
29 #include <linux/string.h>
30 #include <linux/pci.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/acpi.h>
34 #include <linux/efi.h>
35 #include <linux/nodemask.h>
36 #include <linux/bitops.h>         /* hweight64() */
37 #include <linux/crash_dump.h>
38 #include <linux/iommu-helper.h>
39 #include <linux/dma-mapping.h>
40 
41 #include <asm/delay.h>		/* ia64_get_itc() */
42 #include <asm/io.h>
43 #include <asm/page.h>		/* PAGE_OFFSET */
44 #include <asm/dma.h>
45 #include <asm/system.h>		/* wmb() */
46 
47 #include <asm/acpi-ext.h>
48 
49 extern int swiotlb_late_init_with_default_size (size_t size);
50 
51 #define PFX "IOC: "
52 
53 /*
54 ** Enabling timing search of the pdir resource map.  Output in /proc.
55 ** Disabled by default to optimize performance.
56 */
57 #undef PDIR_SEARCH_TIMING
58 
59 /*
60 ** This option allows cards capable of 64bit DMA to bypass the IOMMU.  If
61 ** not defined, all DMA will be 32bit and go through the TLB.
62 ** There's potentially a conflict in the bio merge code with us
63 ** advertising an iommu, but then bypassing it.  Since I/O MMU bypassing
64 ** appears to give more performance than bio-level virtual merging, we'll
65 ** do the former for now.  NOTE: BYPASS_SG also needs to be undef'd to
66 ** completely restrict DMA to the IOMMU.
67 */
68 #define ALLOW_IOV_BYPASS
69 
70 /*
71 ** This option specifically allows/disallows bypassing scatterlists with
72 ** multiple entries.  Coalescing these entries can allow better DMA streaming
73 ** and in some cases shows better performance than entirely bypassing the
74 ** IOMMU.  Performance increase on the order of 1-2% sequential output/input
75 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
76 */
77 #undef ALLOW_IOV_BYPASS_SG
78 
79 /*
80 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
81 ** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
82 ** disconnect on 4k boundaries and prevent such issues.  If the device is
83 ** particularly aggressive, this option will keep the entire pdir valid such
84 ** that prefetching will hit a valid address.  This could severely impact
85 ** error containment, and is therefore off by default.  The page that is
86 ** used for spill-over is poisoned, so that should help debugging somewhat.
87 */
88 #undef FULL_VALID_PDIR
89 
90 #define ENABLE_MARK_CLEAN
91 
92 /*
93 ** The number of debug flags is a clue - this code is fragile.  NOTE: since
94 ** tightening the use of res_lock the resource bitmap and actual pdir are no
95 ** longer guaranteed to stay in sync.  The sanity checking code isn't going to
96 ** like that.
97 */
98 #undef DEBUG_SBA_INIT
99 #undef DEBUG_SBA_RUN
100 #undef DEBUG_SBA_RUN_SG
101 #undef DEBUG_SBA_RESOURCE
102 #undef ASSERT_PDIR_SANITY
103 #undef DEBUG_LARGE_SG_ENTRIES
104 #undef DEBUG_BYPASS
105 
106 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
107 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
108 #endif
109 
110 #define SBA_INLINE	__inline__
111 /* #define SBA_INLINE */
112 
113 #ifdef DEBUG_SBA_INIT
114 #define DBG_INIT(x...)	printk(x)
115 #else
116 #define DBG_INIT(x...)
117 #endif
118 
119 #ifdef DEBUG_SBA_RUN
120 #define DBG_RUN(x...)	printk(x)
121 #else
122 #define DBG_RUN(x...)
123 #endif
124 
125 #ifdef DEBUG_SBA_RUN_SG
126 #define DBG_RUN_SG(x...)	printk(x)
127 #else
128 #define DBG_RUN_SG(x...)
129 #endif
130 
131 
132 #ifdef DEBUG_SBA_RESOURCE
133 #define DBG_RES(x...)	printk(x)
134 #else
135 #define DBG_RES(x...)
136 #endif
137 
138 #ifdef DEBUG_BYPASS
139 #define DBG_BYPASS(x...)	printk(x)
140 #else
141 #define DBG_BYPASS(x...)
142 #endif
143 
144 #ifdef ASSERT_PDIR_SANITY
145 #define ASSERT(expr) \
146         if(!(expr)) { \
147                 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
148                 panic(#expr); \
149         }
150 #else
151 #define ASSERT(expr)
152 #endif
153 
154 /*
155 ** The number of pdir entries to "free" before issuing
156 ** a read to PCOM register to flush out PCOM writes.
157 ** Interacts with allocation granularity (ie 4 or 8 entries
158 ** allocated and free'd/purged at a time might make this
159 ** less interesting).
160 */
161 #define DELAYED_RESOURCE_CNT	64
162 
163 #define PCI_DEVICE_ID_HP_SX2000_IOC	0x12ec
164 
165 #define ZX1_IOC_ID	((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
166 #define ZX2_IOC_ID	((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
167 #define REO_IOC_ID	((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
168 #define SX1000_IOC_ID	((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
169 #define SX2000_IOC_ID	((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
170 
171 #define ZX1_IOC_OFFSET	0x1000	/* ACPI reports SBA, we want IOC */
172 
173 #define IOC_FUNC_ID	0x000
174 #define IOC_FCLASS	0x008	/* function class, bist, header, rev... */
175 #define IOC_IBASE	0x300	/* IO TLB */
176 #define IOC_IMASK	0x308
177 #define IOC_PCOM	0x310
178 #define IOC_TCNFG	0x318
179 #define IOC_PDIR_BASE	0x320
180 
181 #define IOC_ROPE0_CFG	0x500
182 #define   IOC_ROPE_AO	  0x10	/* Allow "Relaxed Ordering" */
183 
184 
185 /* AGP GART driver looks for this */
186 #define ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
187 
188 /*
189 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
190 **
191 ** Some IOCs (sx1000) can run at the above pages sizes, but are
192 ** really only supported using the IOC at a 4k page size.
193 **
194 ** iovp_size could only be greater than PAGE_SIZE if we are
195 ** confident the drivers really only touch the next physical
196 ** page iff that driver instance owns it.
197 */
198 static unsigned long iovp_size;
199 static unsigned long iovp_shift;
200 static unsigned long iovp_mask;
201 
202 struct ioc {
203 	void __iomem	*ioc_hpa;	/* I/O MMU base address */
204 	char		*res_map;	/* resource map, bit == pdir entry */
205 	u64		*pdir_base;	/* physical base address */
206 	unsigned long	ibase;		/* pdir IOV Space base */
207 	unsigned long	imask;		/* pdir IOV Space mask */
208 
209 	unsigned long	*res_hint;	/* next avail IOVP - circular search */
210 	unsigned long	dma_mask;
211 	spinlock_t	res_lock;	/* protects the resource bitmap, but must be held when */
212 					/* clearing pdir to prevent races with allocations. */
213 	unsigned int	res_bitshift;	/* from the RIGHT! */
214 	unsigned int	res_size;	/* size of resource map in bytes */
215 #ifdef CONFIG_NUMA
216 	unsigned int	node;		/* node where this IOC lives */
217 #endif
218 #if DELAYED_RESOURCE_CNT > 0
219 	spinlock_t	saved_lock;	/* may want to try to get this on a separate cacheline */
220 					/* than res_lock for bigger systems. */
221 	int		saved_cnt;
222 	struct sba_dma_pair {
223 		dma_addr_t	iova;
224 		size_t		size;
225 	} saved[DELAYED_RESOURCE_CNT];
226 #endif
227 
228 #ifdef PDIR_SEARCH_TIMING
229 #define SBA_SEARCH_SAMPLE	0x100
230 	unsigned long avg_search[SBA_SEARCH_SAMPLE];
231 	unsigned long avg_idx;	/* current index into avg_search */
232 #endif
233 
234 	/* Stuff we don't need in performance path */
235 	struct ioc	*next;		/* list of IOC's in system */
236 	acpi_handle	handle;		/* for multiple IOC's */
237 	const char 	*name;
238 	unsigned int	func_id;
239 	unsigned int	rev;		/* HW revision of chip */
240 	u32		iov_size;
241 	unsigned int	pdir_size;	/* in bytes, determined by IOV Space size */
242 	struct pci_dev	*sac_only_dev;
243 };
244 
245 static struct ioc *ioc_list;
246 static int reserve_sba_gart = 1;
247 
248 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
249 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
250 
251 #define sba_sg_address(sg)	sg_virt((sg))
252 
253 #ifdef FULL_VALID_PDIR
254 static u64 prefetch_spill_page;
255 #endif
256 
257 #ifdef CONFIG_PCI
258 # define GET_IOC(dev)	(((dev)->bus == &pci_bus_type)						\
259 			 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
260 #else
261 # define GET_IOC(dev)	NULL
262 #endif
263 
264 /*
265 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
266 ** (or rather not merge) DMAs into manageable chunks.
267 ** On parisc, this is more of the software/tuning constraint
268 ** rather than the HW. I/O MMU allocation algorithms can be
269 ** faster with smaller sizes (to some degree).
270 */
271 #define DMA_CHUNK_SIZE  (BITS_PER_LONG*iovp_size)
272 
273 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
274 
275 /************************************
276 ** SBA register read and write support
277 **
278 ** BE WARNED: register writes are posted.
279 **  (ie follow writes which must reach HW with a read)
280 **
281 */
282 #define READ_REG(addr)       __raw_readq(addr)
283 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
284 
285 #ifdef DEBUG_SBA_INIT
286 
287 /**
288  * sba_dump_tlb - debugging only - print IOMMU operating parameters
289  * @hpa: base address of the IOMMU
290  *
291  * Print the size/location of the IO MMU PDIR.
292  */
293 static void
294 sba_dump_tlb(char *hpa)
295 {
296 	DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
297 	DBG_INIT("IOC_IBASE    : %016lx\n", READ_REG(hpa+IOC_IBASE));
298 	DBG_INIT("IOC_IMASK    : %016lx\n", READ_REG(hpa+IOC_IMASK));
299 	DBG_INIT("IOC_TCNFG    : %016lx\n", READ_REG(hpa+IOC_TCNFG));
300 	DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
301 	DBG_INIT("\n");
302 }
303 #endif
304 
305 
306 #ifdef ASSERT_PDIR_SANITY
307 
308 /**
309  * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
310  * @ioc: IO MMU structure which owns the pdir we are interested in.
311  * @msg: text to print ont the output line.
312  * @pide: pdir index.
313  *
314  * Print one entry of the IO MMU PDIR in human readable form.
315  */
316 static void
317 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
318 {
319 	/* start printing from lowest pde in rval */
320 	u64 *ptr = &ioc->pdir_base[pide  & ~(BITS_PER_LONG - 1)];
321 	unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
322 	uint rcnt;
323 
324 	printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
325 		 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
326 
327 	rcnt = 0;
328 	while (rcnt < BITS_PER_LONG) {
329 		printk(KERN_DEBUG "%s %2d %p %016Lx\n",
330 		       (rcnt == (pide & (BITS_PER_LONG - 1)))
331 		       ? "    -->" : "       ",
332 		       rcnt, ptr, (unsigned long long) *ptr );
333 		rcnt++;
334 		ptr++;
335 	}
336 	printk(KERN_DEBUG "%s", msg);
337 }
338 
339 
340 /**
341  * sba_check_pdir - debugging only - consistency checker
342  * @ioc: IO MMU structure which owns the pdir we are interested in.
343  * @msg: text to print ont the output line.
344  *
345  * Verify the resource map and pdir state is consistent
346  */
347 static int
348 sba_check_pdir(struct ioc *ioc, char *msg)
349 {
350 	u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
351 	u64 *rptr = (u64 *) ioc->res_map;	/* resource map ptr */
352 	u64 *pptr = ioc->pdir_base;	/* pdir ptr */
353 	uint pide = 0;
354 
355 	while (rptr < rptr_end) {
356 		u64 rval;
357 		int rcnt; /* number of bits we might check */
358 
359 		rval = *rptr;
360 		rcnt = 64;
361 
362 		while (rcnt) {
363 			/* Get last byte and highest bit from that */
364 			u32 pde = ((u32)((*pptr >> (63)) & 0x1));
365 			if ((rval & 0x1) ^ pde)
366 			{
367 				/*
368 				** BUMMER!  -- res_map != pdir --
369 				** Dump rval and matching pdir entries
370 				*/
371 				sba_dump_pdir_entry(ioc, msg, pide);
372 				return(1);
373 			}
374 			rcnt--;
375 			rval >>= 1;	/* try the next bit */
376 			pptr++;
377 			pide++;
378 		}
379 		rptr++;	/* look at next word of res_map */
380 	}
381 	/* It'd be nice if we always got here :^) */
382 	return 0;
383 }
384 
385 
386 /**
387  * sba_dump_sg - debugging only - print Scatter-Gather list
388  * @ioc: IO MMU structure which owns the pdir we are interested in.
389  * @startsg: head of the SG list
390  * @nents: number of entries in SG list
391  *
392  * print the SG list so we can verify it's correct by hand.
393  */
394 static void
395 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
396 {
397 	while (nents-- > 0) {
398 		printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
399 		       startsg->dma_address, startsg->dma_length,
400 		       sba_sg_address(startsg));
401 		startsg = sg_next(startsg);
402 	}
403 }
404 
405 static void
406 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
407 {
408 	struct scatterlist *the_sg = startsg;
409 	int the_nents = nents;
410 
411 	while (the_nents-- > 0) {
412 		if (sba_sg_address(the_sg) == 0x0UL)
413 			sba_dump_sg(NULL, startsg, nents);
414 		the_sg = sg_next(the_sg);
415 	}
416 }
417 
418 #endif /* ASSERT_PDIR_SANITY */
419 
420 
421 
422 
423 /**************************************************************
424 *
425 *   I/O Pdir Resource Management
426 *
427 *   Bits set in the resource map are in use.
428 *   Each bit can represent a number of pages.
429 *   LSbs represent lower addresses (IOVA's).
430 *
431 ***************************************************************/
432 #define PAGES_PER_RANGE 1	/* could increase this to 4 or 8 if needed */
433 
434 /* Convert from IOVP to IOVA and vice versa. */
435 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
437 
438 #define PDIR_ENTRY_SIZE	sizeof(u64)
439 
440 #define PDIR_INDEX(iovp)   ((iovp)>>iovp_shift)
441 
442 #define RESMAP_MASK(n)    ~(~0UL << (n))
443 #define RESMAP_IDX_MASK   (sizeof(unsigned long) - 1)
444 
445 
446 /**
447  * For most cases the normal get_order is sufficient, however it limits us
448  * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
449  * It only incurs about 1 clock cycle to use this one with the static variable
450  * and makes the code more intuitive.
451  */
452 static SBA_INLINE int
453 get_iovp_order (unsigned long size)
454 {
455 	long double d = size - 1;
456 	long order;
457 
458 	order = ia64_getf_exp(d);
459 	order = order - iovp_shift - 0xffff + 1;
460 	if (order < 0)
461 		order = 0;
462 	return order;
463 }
464 
465 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
466 				 unsigned int bitshiftcnt)
467 {
468 	return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
469 		+ bitshiftcnt;
470 }
471 
472 /**
473  * sba_search_bitmap - find free space in IO PDIR resource bitmap
474  * @ioc: IO MMU structure which owns the pdir we are interested in.
475  * @bits_wanted: number of entries we need.
476  * @use_hint: use res_hint to indicate where to start looking
477  *
478  * Find consecutive free bits in resource bitmap.
479  * Each bit represents one entry in the IO Pdir.
480  * Cool perf optimization: search for log2(size) bits at a time.
481  */
482 static SBA_INLINE unsigned long
483 sba_search_bitmap(struct ioc *ioc, struct device *dev,
484 		  unsigned long bits_wanted, int use_hint)
485 {
486 	unsigned long *res_ptr;
487 	unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
488 	unsigned long flags, pide = ~0UL, tpide;
489 	unsigned long boundary_size;
490 	unsigned long shift;
491 	int ret;
492 
493 	ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
494 	ASSERT(res_ptr < res_end);
495 
496 	boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
497 	boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
498 
499 	BUG_ON(ioc->ibase & ~iovp_mask);
500 	shift = ioc->ibase >> iovp_shift;
501 
502 	spin_lock_irqsave(&ioc->res_lock, flags);
503 
504 	/* Allow caller to force a search through the entire resource space */
505 	if (likely(use_hint)) {
506 		res_ptr = ioc->res_hint;
507 	} else {
508 		res_ptr = (ulong *)ioc->res_map;
509 		ioc->res_bitshift = 0;
510 	}
511 
512 	/*
513 	 * N.B.  REO/Grande defect AR2305 can cause TLB fetch timeouts
514 	 * if a TLB entry is purged while in use.  sba_mark_invalid()
515 	 * purges IOTLB entries in power-of-two sizes, so we also
516 	 * allocate IOVA space in power-of-two sizes.
517 	 */
518 	bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
519 
520 	if (likely(bits_wanted == 1)) {
521 		unsigned int bitshiftcnt;
522 		for(; res_ptr < res_end ; res_ptr++) {
523 			if (likely(*res_ptr != ~0UL)) {
524 				bitshiftcnt = ffz(*res_ptr);
525 				*res_ptr |= (1UL << bitshiftcnt);
526 				pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
527 				ioc->res_bitshift = bitshiftcnt + bits_wanted;
528 				goto found_it;
529 			}
530 		}
531 		goto not_found;
532 
533 	}
534 
535 	if (likely(bits_wanted <= BITS_PER_LONG/2)) {
536 		/*
537 		** Search the resource bit map on well-aligned values.
538 		** "o" is the alignment.
539 		** We need the alignment to invalidate I/O TLB using
540 		** SBA HW features in the unmap path.
541 		*/
542 		unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
543 		uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
544 		unsigned long mask, base_mask;
545 
546 		base_mask = RESMAP_MASK(bits_wanted);
547 		mask = base_mask << bitshiftcnt;
548 
549 		DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
550 		for(; res_ptr < res_end ; res_ptr++)
551 		{
552 			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
553 			ASSERT(0 != mask);
554 			for (; mask ; mask <<= o, bitshiftcnt += o) {
555 				tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
556 				ret = iommu_is_span_boundary(tpide, bits_wanted,
557 							     shift,
558 							     boundary_size);
559 				if ((0 == ((*res_ptr) & mask)) && !ret) {
560 					*res_ptr |= mask;     /* mark resources busy! */
561 					pide = tpide;
562 					ioc->res_bitshift = bitshiftcnt + bits_wanted;
563 					goto found_it;
564 				}
565 			}
566 
567 			bitshiftcnt = 0;
568 			mask = base_mask;
569 
570 		}
571 
572 	} else {
573 		int qwords, bits, i;
574 		unsigned long *end;
575 
576 		qwords = bits_wanted >> 6; /* /64 */
577 		bits = bits_wanted - (qwords * BITS_PER_LONG);
578 
579 		end = res_end - qwords;
580 
581 		for (; res_ptr < end; res_ptr++) {
582 			tpide = ptr_to_pide(ioc, res_ptr, 0);
583 			ret = iommu_is_span_boundary(tpide, bits_wanted,
584 						     shift, boundary_size);
585 			if (ret)
586 				goto next_ptr;
587 			for (i = 0 ; i < qwords ; i++) {
588 				if (res_ptr[i] != 0)
589 					goto next_ptr;
590 			}
591 			if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
592 				continue;
593 
594 			/* Found it, mark it */
595 			for (i = 0 ; i < qwords ; i++)
596 				res_ptr[i] = ~0UL;
597 			res_ptr[i] |= RESMAP_MASK(bits);
598 
599 			pide = tpide;
600 			res_ptr += qwords;
601 			ioc->res_bitshift = bits;
602 			goto found_it;
603 next_ptr:
604 			;
605 		}
606 	}
607 
608 not_found:
609 	prefetch(ioc->res_map);
610 	ioc->res_hint = (unsigned long *) ioc->res_map;
611 	ioc->res_bitshift = 0;
612 	spin_unlock_irqrestore(&ioc->res_lock, flags);
613 	return (pide);
614 
615 found_it:
616 	ioc->res_hint = res_ptr;
617 	spin_unlock_irqrestore(&ioc->res_lock, flags);
618 	return (pide);
619 }
620 
621 
622 /**
623  * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
624  * @ioc: IO MMU structure which owns the pdir we are interested in.
625  * @size: number of bytes to create a mapping for
626  *
627  * Given a size, find consecutive unmarked and then mark those bits in the
628  * resource bit map.
629  */
630 static int
631 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
632 {
633 	unsigned int pages_needed = size >> iovp_shift;
634 #ifdef PDIR_SEARCH_TIMING
635 	unsigned long itc_start;
636 #endif
637 	unsigned long pide;
638 
639 	ASSERT(pages_needed);
640 	ASSERT(0 == (size & ~iovp_mask));
641 
642 #ifdef PDIR_SEARCH_TIMING
643 	itc_start = ia64_get_itc();
644 #endif
645 	/*
646 	** "seek and ye shall find"...praying never hurts either...
647 	*/
648 	pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
649 	if (unlikely(pide >= (ioc->res_size << 3))) {
650 		pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
651 		if (unlikely(pide >= (ioc->res_size << 3))) {
652 #if DELAYED_RESOURCE_CNT > 0
653 			unsigned long flags;
654 
655 			/*
656 			** With delayed resource freeing, we can give this one more shot.  We're
657 			** getting close to being in trouble here, so do what we can to make this
658 			** one count.
659 			*/
660 			spin_lock_irqsave(&ioc->saved_lock, flags);
661 			if (ioc->saved_cnt > 0) {
662 				struct sba_dma_pair *d;
663 				int cnt = ioc->saved_cnt;
664 
665 				d = &(ioc->saved[ioc->saved_cnt - 1]);
666 
667 				spin_lock(&ioc->res_lock);
668 				while (cnt--) {
669 					sba_mark_invalid(ioc, d->iova, d->size);
670 					sba_free_range(ioc, d->iova, d->size);
671 					d--;
672 				}
673 				ioc->saved_cnt = 0;
674 				READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
675 				spin_unlock(&ioc->res_lock);
676 			}
677 			spin_unlock_irqrestore(&ioc->saved_lock, flags);
678 
679 			pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 			if (unlikely(pide >= (ioc->res_size << 3)))
681 				panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
682 				      ioc->ioc_hpa);
683 #else
684 			panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
685 			      ioc->ioc_hpa);
686 #endif
687 		}
688 	}
689 
690 #ifdef PDIR_SEARCH_TIMING
691 	ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
692 	ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
693 #endif
694 
695 	prefetchw(&(ioc->pdir_base[pide]));
696 
697 #ifdef ASSERT_PDIR_SANITY
698 	/* verify the first enable bit is clear */
699 	if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
700 		sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
701 	}
702 #endif
703 
704 	DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
705 		__func__, size, pages_needed, pide,
706 		(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
707 		ioc->res_bitshift );
708 
709 	return (pide);
710 }
711 
712 
713 /**
714  * sba_free_range - unmark bits in IO PDIR resource bitmap
715  * @ioc: IO MMU structure which owns the pdir we are interested in.
716  * @iova: IO virtual address which was previously allocated.
717  * @size: number of bytes to create a mapping for
718  *
719  * clear bits in the ioc's resource map
720  */
721 static SBA_INLINE void
722 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
723 {
724 	unsigned long iovp = SBA_IOVP(ioc, iova);
725 	unsigned int pide = PDIR_INDEX(iovp);
726 	unsigned int ridx = pide >> 3;	/* convert bit to byte address */
727 	unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
728 	int bits_not_wanted = size >> iovp_shift;
729 	unsigned long m;
730 
731 	/* Round up to power-of-two size: see AR2305 note above */
732 	bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
733 	for (; bits_not_wanted > 0 ; res_ptr++) {
734 
735 		if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
736 
737 			/* these mappings start 64bit aligned */
738 			*res_ptr = 0UL;
739 			bits_not_wanted -= BITS_PER_LONG;
740 			pide += BITS_PER_LONG;
741 
742 		} else {
743 
744 			/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
745 			m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
746 			bits_not_wanted = 0;
747 
748 			DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
749 			        bits_not_wanted, m, pide, res_ptr, *res_ptr);
750 
751 			ASSERT(m != 0);
752 			ASSERT(bits_not_wanted);
753 			ASSERT((*res_ptr & m) == m); /* verify same bits are set */
754 			*res_ptr &= ~m;
755 		}
756 	}
757 }
758 
759 
760 /**************************************************************
761 *
762 *   "Dynamic DMA Mapping" support (aka "Coherent I/O")
763 *
764 ***************************************************************/
765 
766 /**
767  * sba_io_pdir_entry - fill in one IO PDIR entry
768  * @pdir_ptr:  pointer to IO PDIR entry
769  * @vba: Virtual CPU address of buffer to map
770  *
771  * SBA Mapping Routine
772  *
773  * Given a virtual address (vba, arg1) sba_io_pdir_entry()
774  * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
775  * Each IO Pdir entry consists of 8 bytes as shown below
776  * (LSB == bit 0):
777  *
778  *  63                    40                                 11    7        0
779  * +-+---------------------+----------------------------------+----+--------+
780  * |V|        U            |            PPN[39:12]            | U  |   FF   |
781  * +-+---------------------+----------------------------------+----+--------+
782  *
783  *  V  == Valid Bit
784  *  U  == Unused
785  * PPN == Physical Page Number
786  *
787  * The physical address fields are filled with the results of virt_to_phys()
788  * on the vba.
789  */
790 
791 #if 1
792 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)	\
793 						      | 0x8000000000000000ULL)
794 #else
795 void SBA_INLINE
796 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
797 {
798 	*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
799 }
800 #endif
801 
802 #ifdef ENABLE_MARK_CLEAN
803 /**
804  * Since DMA is i-cache coherent, any (complete) pages that were written via
805  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
806  * flush them when they get mapped into an executable vm-area.
807  */
808 static void
809 mark_clean (void *addr, size_t size)
810 {
811 	unsigned long pg_addr, end;
812 
813 	pg_addr = PAGE_ALIGN((unsigned long) addr);
814 	end = (unsigned long) addr + size;
815 	while (pg_addr + PAGE_SIZE <= end) {
816 		struct page *page = virt_to_page((void *)pg_addr);
817 		set_bit(PG_arch_1, &page->flags);
818 		pg_addr += PAGE_SIZE;
819 	}
820 }
821 #endif
822 
823 /**
824  * sba_mark_invalid - invalidate one or more IO PDIR entries
825  * @ioc: IO MMU structure which owns the pdir we are interested in.
826  * @iova:  IO Virtual Address mapped earlier
827  * @byte_cnt:  number of bytes this mapping covers.
828  *
829  * Marking the IO PDIR entry(ies) as Invalid and invalidate
830  * corresponding IO TLB entry. The PCOM (Purge Command Register)
831  * is to purge stale entries in the IO TLB when unmapping entries.
832  *
833  * The PCOM register supports purging of multiple pages, with a minium
834  * of 1 page and a maximum of 2GB. Hardware requires the address be
835  * aligned to the size of the range being purged. The size of the range
836  * must be a power of 2. The "Cool perf optimization" in the
837  * allocation routine helps keep that true.
838  */
839 static SBA_INLINE void
840 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
841 {
842 	u32 iovp = (u32) SBA_IOVP(ioc,iova);
843 
844 	int off = PDIR_INDEX(iovp);
845 
846 	/* Must be non-zero and rounded up */
847 	ASSERT(byte_cnt > 0);
848 	ASSERT(0 == (byte_cnt & ~iovp_mask));
849 
850 #ifdef ASSERT_PDIR_SANITY
851 	/* Assert first pdir entry is set */
852 	if (!(ioc->pdir_base[off] >> 60)) {
853 		sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
854 	}
855 #endif
856 
857 	if (byte_cnt <= iovp_size)
858 	{
859 		ASSERT(off < ioc->pdir_size);
860 
861 		iovp |= iovp_shift;     /* set "size" field for PCOM */
862 
863 #ifndef FULL_VALID_PDIR
864 		/*
865 		** clear I/O PDIR entry "valid" bit
866 		** Do NOT clear the rest - save it for debugging.
867 		** We should only clear bits that have previously
868 		** been enabled.
869 		*/
870 		ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
871 #else
872 		/*
873   		** If we want to maintain the PDIR as valid, put in
874 		** the spill page so devices prefetching won't
875 		** cause a hard fail.
876 		*/
877 		ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
878 #endif
879 	} else {
880 		u32 t = get_iovp_order(byte_cnt) + iovp_shift;
881 
882 		iovp |= t;
883 		ASSERT(t <= 31);   /* 2GB! Max value of "size" field */
884 
885 		do {
886 			/* verify this pdir entry is enabled */
887 			ASSERT(ioc->pdir_base[off]  >> 63);
888 #ifndef FULL_VALID_PDIR
889 			/* clear I/O Pdir entry "valid" bit first */
890 			ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
891 #else
892 			ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
893 #endif
894 			off++;
895 			byte_cnt -= iovp_size;
896 		} while (byte_cnt > 0);
897 	}
898 
899 	WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
900 }
901 
902 /**
903  * sba_map_single_attrs - map one buffer and return IOVA for DMA
904  * @dev: instance of PCI owned by the driver that's asking.
905  * @addr:  driver buffer to map.
906  * @size:  number of bytes to map in driver buffer.
907  * @dir:  R/W or both.
908  * @attrs: optional dma attributes
909  *
910  * See Documentation/PCI/PCI-DMA-mapping.txt
911  */
912 static dma_addr_t sba_map_page(struct device *dev, struct page *page,
913 			       unsigned long poff, size_t size,
914 			       enum dma_data_direction dir,
915 			       struct dma_attrs *attrs)
916 {
917 	struct ioc *ioc;
918 	void *addr = page_address(page) + poff;
919 	dma_addr_t iovp;
920 	dma_addr_t offset;
921 	u64 *pdir_start;
922 	int pide;
923 #ifdef ASSERT_PDIR_SANITY
924 	unsigned long flags;
925 #endif
926 #ifdef ALLOW_IOV_BYPASS
927 	unsigned long pci_addr = virt_to_phys(addr);
928 #endif
929 
930 #ifdef ALLOW_IOV_BYPASS
931 	ASSERT(to_pci_dev(dev)->dma_mask);
932 	/*
933  	** Check if the PCI device can DMA to ptr... if so, just return ptr
934  	*/
935 	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
936 		/*
937  		** Device is bit capable of DMA'ing to the buffer...
938 		** just return the PCI address of ptr
939  		*/
940 		DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
941 			   "0x%lx/0x%lx\n",
942 		           to_pci_dev(dev)->dma_mask, pci_addr);
943 		return pci_addr;
944 	}
945 #endif
946 	ioc = GET_IOC(dev);
947 	ASSERT(ioc);
948 
949 	prefetch(ioc->res_hint);
950 
951 	ASSERT(size > 0);
952 	ASSERT(size <= DMA_CHUNK_SIZE);
953 
954 	/* save offset bits */
955 	offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
956 
957 	/* round up to nearest iovp_size */
958 	size = (size + offset + ~iovp_mask) & iovp_mask;
959 
960 #ifdef ASSERT_PDIR_SANITY
961 	spin_lock_irqsave(&ioc->res_lock, flags);
962 	if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
963 		panic("Sanity check failed");
964 	spin_unlock_irqrestore(&ioc->res_lock, flags);
965 #endif
966 
967 	pide = sba_alloc_range(ioc, dev, size);
968 
969 	iovp = (dma_addr_t) pide << iovp_shift;
970 
971 	DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
972 
973 	pdir_start = &(ioc->pdir_base[pide]);
974 
975 	while (size > 0) {
976 		ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
977 		sba_io_pdir_entry(pdir_start, (unsigned long) addr);
978 
979 		DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
980 
981 		addr += iovp_size;
982 		size -= iovp_size;
983 		pdir_start++;
984 	}
985 	/* force pdir update */
986 	wmb();
987 
988 	/* form complete address */
989 #ifdef ASSERT_PDIR_SANITY
990 	spin_lock_irqsave(&ioc->res_lock, flags);
991 	sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
992 	spin_unlock_irqrestore(&ioc->res_lock, flags);
993 #endif
994 	return SBA_IOVA(ioc, iovp, offset);
995 }
996 
997 static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
998 				       size_t size, enum dma_data_direction dir,
999 				       struct dma_attrs *attrs)
1000 {
1001 	return sba_map_page(dev, virt_to_page(addr),
1002 			    (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1003 }
1004 
1005 #ifdef ENABLE_MARK_CLEAN
1006 static SBA_INLINE void
1007 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1008 {
1009 	u32	iovp = (u32) SBA_IOVP(ioc,iova);
1010 	int	off = PDIR_INDEX(iovp);
1011 	void	*addr;
1012 
1013 	if (size <= iovp_size) {
1014 		addr = phys_to_virt(ioc->pdir_base[off] &
1015 		                    ~0xE000000000000FFFULL);
1016 		mark_clean(addr, size);
1017 	} else {
1018 		do {
1019 			addr = phys_to_virt(ioc->pdir_base[off] &
1020 			                    ~0xE000000000000FFFULL);
1021 			mark_clean(addr, min(size, iovp_size));
1022 			off++;
1023 			size -= iovp_size;
1024 		} while (size > 0);
1025 	}
1026 }
1027 #endif
1028 
1029 /**
1030  * sba_unmap_single_attrs - unmap one IOVA and free resources
1031  * @dev: instance of PCI owned by the driver that's asking.
1032  * @iova:  IOVA of driver buffer previously mapped.
1033  * @size:  number of bytes mapped in driver buffer.
1034  * @dir:  R/W or both.
1035  * @attrs: optional dma attributes
1036  *
1037  * See Documentation/PCI/PCI-DMA-mapping.txt
1038  */
1039 static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1040 			   enum dma_data_direction dir, struct dma_attrs *attrs)
1041 {
1042 	struct ioc *ioc;
1043 #if DELAYED_RESOURCE_CNT > 0
1044 	struct sba_dma_pair *d;
1045 #endif
1046 	unsigned long flags;
1047 	dma_addr_t offset;
1048 
1049 	ioc = GET_IOC(dev);
1050 	ASSERT(ioc);
1051 
1052 #ifdef ALLOW_IOV_BYPASS
1053 	if (likely((iova & ioc->imask) != ioc->ibase)) {
1054 		/*
1055 		** Address does not fall w/in IOVA, must be bypassing
1056 		*/
1057 		DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
1058 			   iova);
1059 
1060 #ifdef ENABLE_MARK_CLEAN
1061 		if (dir == DMA_FROM_DEVICE) {
1062 			mark_clean(phys_to_virt(iova), size);
1063 		}
1064 #endif
1065 		return;
1066 	}
1067 #endif
1068 	offset = iova & ~iovp_mask;
1069 
1070 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1071 
1072 	iova ^= offset;        /* clear offset bits */
1073 	size += offset;
1074 	size = ROUNDUP(size, iovp_size);
1075 
1076 #ifdef ENABLE_MARK_CLEAN
1077 	if (dir == DMA_FROM_DEVICE)
1078 		sba_mark_clean(ioc, iova, size);
1079 #endif
1080 
1081 #if DELAYED_RESOURCE_CNT > 0
1082 	spin_lock_irqsave(&ioc->saved_lock, flags);
1083 	d = &(ioc->saved[ioc->saved_cnt]);
1084 	d->iova = iova;
1085 	d->size = size;
1086 	if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1087 		int cnt = ioc->saved_cnt;
1088 		spin_lock(&ioc->res_lock);
1089 		while (cnt--) {
1090 			sba_mark_invalid(ioc, d->iova, d->size);
1091 			sba_free_range(ioc, d->iova, d->size);
1092 			d--;
1093 		}
1094 		ioc->saved_cnt = 0;
1095 		READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
1096 		spin_unlock(&ioc->res_lock);
1097 	}
1098 	spin_unlock_irqrestore(&ioc->saved_lock, flags);
1099 #else /* DELAYED_RESOURCE_CNT == 0 */
1100 	spin_lock_irqsave(&ioc->res_lock, flags);
1101 	sba_mark_invalid(ioc, iova, size);
1102 	sba_free_range(ioc, iova, size);
1103 	READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
1104 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1105 #endif /* DELAYED_RESOURCE_CNT == 0 */
1106 }
1107 
1108 void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1109 			    enum dma_data_direction dir, struct dma_attrs *attrs)
1110 {
1111 	sba_unmap_page(dev, iova, size, dir, attrs);
1112 }
1113 
1114 /**
1115  * sba_alloc_coherent - allocate/map shared mem for DMA
1116  * @dev: instance of PCI owned by the driver that's asking.
1117  * @size:  number of bytes mapped in driver buffer.
1118  * @dma_handle:  IOVA of new buffer.
1119  *
1120  * See Documentation/PCI/PCI-DMA-mapping.txt
1121  */
1122 static void *
1123 sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1124 {
1125 	struct ioc *ioc;
1126 	void *addr;
1127 
1128 	ioc = GET_IOC(dev);
1129 	ASSERT(ioc);
1130 
1131 #ifdef CONFIG_NUMA
1132 	{
1133 		struct page *page;
1134 		page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
1135 		                        numa_node_id() : ioc->node, flags,
1136 		                        get_order(size));
1137 
1138 		if (unlikely(!page))
1139 			return NULL;
1140 
1141 		addr = page_address(page);
1142 	}
1143 #else
1144 	addr = (void *) __get_free_pages(flags, get_order(size));
1145 #endif
1146 	if (unlikely(!addr))
1147 		return NULL;
1148 
1149 	memset(addr, 0, size);
1150 	*dma_handle = virt_to_phys(addr);
1151 
1152 #ifdef ALLOW_IOV_BYPASS
1153 	ASSERT(dev->coherent_dma_mask);
1154 	/*
1155  	** Check if the PCI device can DMA to ptr... if so, just return ptr
1156  	*/
1157 	if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1158 		DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1159 		           dev->coherent_dma_mask, *dma_handle);
1160 
1161 		return addr;
1162 	}
1163 #endif
1164 
1165 	/*
1166 	 * If device can't bypass or bypass is disabled, pass the 32bit fake
1167 	 * device to map single to get an iova mapping.
1168 	 */
1169 	*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1170 					   size, 0, NULL);
1171 
1172 	return addr;
1173 }
1174 
1175 
1176 /**
1177  * sba_free_coherent - free/unmap shared mem for DMA
1178  * @dev: instance of PCI owned by the driver that's asking.
1179  * @size:  number of bytes mapped in driver buffer.
1180  * @vaddr:  virtual address IOVA of "consistent" buffer.
1181  * @dma_handler:  IO virtual address of "consistent" buffer.
1182  *
1183  * See Documentation/PCI/PCI-DMA-mapping.txt
1184  */
1185 static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1186 			       dma_addr_t dma_handle)
1187 {
1188 	sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1189 	free_pages((unsigned long) vaddr, get_order(size));
1190 }
1191 
1192 
1193 /*
1194 ** Since 0 is a valid pdir_base index value, can't use that
1195 ** to determine if a value is valid or not. Use a flag to indicate
1196 ** the SG list entry contains a valid pdir index.
1197 */
1198 #define PIDE_FLAG 0x1UL
1199 
1200 #ifdef DEBUG_LARGE_SG_ENTRIES
1201 int dump_run_sg = 0;
1202 #endif
1203 
1204 
1205 /**
1206  * sba_fill_pdir - write allocated SG entries into IO PDIR
1207  * @ioc: IO MMU structure which owns the pdir we are interested in.
1208  * @startsg:  list of IOVA/size pairs
1209  * @nents: number of entries in startsg list
1210  *
1211  * Take preprocessed SG list and write corresponding entries
1212  * in the IO PDIR.
1213  */
1214 
1215 static SBA_INLINE int
1216 sba_fill_pdir(
1217 	struct ioc *ioc,
1218 	struct scatterlist *startsg,
1219 	int nents)
1220 {
1221 	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
1222 	int n_mappings = 0;
1223 	u64 *pdirp = NULL;
1224 	unsigned long dma_offset = 0;
1225 
1226 	while (nents-- > 0) {
1227 		int     cnt = startsg->dma_length;
1228 		startsg->dma_length = 0;
1229 
1230 #ifdef DEBUG_LARGE_SG_ENTRIES
1231 		if (dump_run_sg)
1232 			printk(" %2d : %08lx/%05x %p\n",
1233 				nents, startsg->dma_address, cnt,
1234 				sba_sg_address(startsg));
1235 #else
1236 		DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1237 				nents, startsg->dma_address, cnt,
1238 				sba_sg_address(startsg));
1239 #endif
1240 		/*
1241 		** Look for the start of a new DMA stream
1242 		*/
1243 		if (startsg->dma_address & PIDE_FLAG) {
1244 			u32 pide = startsg->dma_address & ~PIDE_FLAG;
1245 			dma_offset = (unsigned long) pide & ~iovp_mask;
1246 			startsg->dma_address = 0;
1247 			if (n_mappings)
1248 				dma_sg = sg_next(dma_sg);
1249 			dma_sg->dma_address = pide | ioc->ibase;
1250 			pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1251 			n_mappings++;
1252 		}
1253 
1254 		/*
1255 		** Look for a VCONTIG chunk
1256 		*/
1257 		if (cnt) {
1258 			unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1259 			ASSERT(pdirp);
1260 
1261 			/* Since multiple Vcontig blocks could make up
1262 			** one DMA stream, *add* cnt to dma_len.
1263 			*/
1264 			dma_sg->dma_length += cnt;
1265 			cnt += dma_offset;
1266 			dma_offset=0;	/* only want offset on first chunk */
1267 			cnt = ROUNDUP(cnt, iovp_size);
1268 			do {
1269 				sba_io_pdir_entry(pdirp, vaddr);
1270 				vaddr += iovp_size;
1271 				cnt -= iovp_size;
1272 				pdirp++;
1273 			} while (cnt > 0);
1274 		}
1275 		startsg = sg_next(startsg);
1276 	}
1277 	/* force pdir update */
1278 	wmb();
1279 
1280 #ifdef DEBUG_LARGE_SG_ENTRIES
1281 	dump_run_sg = 0;
1282 #endif
1283 	return(n_mappings);
1284 }
1285 
1286 
1287 /*
1288 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1289 ** "start of next" are both on an IOV page boundary.
1290 **
1291 ** (shift left is a quick trick to mask off upper bits)
1292 */
1293 #define DMA_CONTIG(__X, __Y) \
1294 	(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1295 
1296 
1297 /**
1298  * sba_coalesce_chunks - preprocess the SG list
1299  * @ioc: IO MMU structure which owns the pdir we are interested in.
1300  * @startsg:  list of IOVA/size pairs
1301  * @nents: number of entries in startsg list
1302  *
1303  * First pass is to walk the SG list and determine where the breaks are
1304  * in the DMA stream. Allocates PDIR entries but does not fill them.
1305  * Returns the number of DMA chunks.
1306  *
1307  * Doing the fill separate from the coalescing/allocation keeps the
1308  * code simpler. Future enhancement could make one pass through
1309  * the sglist do both.
1310  */
1311 static SBA_INLINE int
1312 sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1313 	struct scatterlist *startsg,
1314 	int nents)
1315 {
1316 	struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */
1317 	unsigned long vcontig_len;         /* len of VCONTIG chunk */
1318 	unsigned long vcontig_end;
1319 	struct scatterlist *dma_sg;        /* next DMA stream head */
1320 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1321 	int n_mappings = 0;
1322 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1323 
1324 	while (nents > 0) {
1325 		unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1326 
1327 		/*
1328 		** Prepare for first/next DMA stream
1329 		*/
1330 		dma_sg = vcontig_sg = startsg;
1331 		dma_len = vcontig_len = vcontig_end = startsg->length;
1332 		vcontig_end +=  vaddr;
1333 		dma_offset = vaddr & ~iovp_mask;
1334 
1335 		/* PARANOID: clear entries */
1336 		startsg->dma_address = startsg->dma_length = 0;
1337 
1338 		/*
1339 		** This loop terminates one iteration "early" since
1340 		** it's always looking one "ahead".
1341 		*/
1342 		while (--nents > 0) {
1343 			unsigned long vaddr;	/* tmp */
1344 
1345 			startsg = sg_next(startsg);
1346 
1347 			/* PARANOID */
1348 			startsg->dma_address = startsg->dma_length = 0;
1349 
1350 			/* catch brokenness in SCSI layer */
1351 			ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1352 
1353 			/*
1354 			** First make sure current dma stream won't
1355 			** exceed DMA_CHUNK_SIZE if we coalesce the
1356 			** next entry.
1357 			*/
1358 			if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1359 			    > DMA_CHUNK_SIZE)
1360 				break;
1361 
1362 			if (dma_len + startsg->length > max_seg_size)
1363 				break;
1364 
1365 			/*
1366 			** Then look for virtually contiguous blocks.
1367 			**
1368 			** append the next transaction?
1369 			*/
1370 			vaddr = (unsigned long) sba_sg_address(startsg);
1371 			if  (vcontig_end == vaddr)
1372 			{
1373 				vcontig_len += startsg->length;
1374 				vcontig_end += startsg->length;
1375 				dma_len     += startsg->length;
1376 				continue;
1377 			}
1378 
1379 #ifdef DEBUG_LARGE_SG_ENTRIES
1380 			dump_run_sg = (vcontig_len > iovp_size);
1381 #endif
1382 
1383 			/*
1384 			** Not virtually contigous.
1385 			** Terminate prev chunk.
1386 			** Start a new chunk.
1387 			**
1388 			** Once we start a new VCONTIG chunk, dma_offset
1389 			** can't change. And we need the offset from the first
1390 			** chunk - not the last one. Ergo Successive chunks
1391 			** must start on page boundaries and dove tail
1392 			** with it's predecessor.
1393 			*/
1394 			vcontig_sg->dma_length = vcontig_len;
1395 
1396 			vcontig_sg = startsg;
1397 			vcontig_len = startsg->length;
1398 
1399 			/*
1400 			** 3) do the entries end/start on page boundaries?
1401 			**    Don't update vcontig_end until we've checked.
1402 			*/
1403 			if (DMA_CONTIG(vcontig_end, vaddr))
1404 			{
1405 				vcontig_end = vcontig_len + vaddr;
1406 				dma_len += vcontig_len;
1407 				continue;
1408 			} else {
1409 				break;
1410 			}
1411 		}
1412 
1413 		/*
1414 		** End of DMA Stream
1415 		** Terminate last VCONTIG block.
1416 		** Allocate space for DMA stream.
1417 		*/
1418 		vcontig_sg->dma_length = vcontig_len;
1419 		dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1420 		ASSERT(dma_len <= DMA_CHUNK_SIZE);
1421 		dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1422 			| (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
1423 			| dma_offset);
1424 		n_mappings++;
1425 	}
1426 
1427 	return n_mappings;
1428 }
1429 
1430 
1431 /**
1432  * sba_map_sg - map Scatter/Gather list
1433  * @dev: instance of PCI owned by the driver that's asking.
1434  * @sglist:  array of buffer/length pairs
1435  * @nents:  number of entries in list
1436  * @dir:  R/W or both.
1437  * @attrs: optional dma attributes
1438  *
1439  * See Documentation/PCI/PCI-DMA-mapping.txt
1440  */
1441 static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1442 			    int nents, enum dma_data_direction dir,
1443 			    struct dma_attrs *attrs)
1444 {
1445 	struct ioc *ioc;
1446 	int coalesced, filled = 0;
1447 #ifdef ASSERT_PDIR_SANITY
1448 	unsigned long flags;
1449 #endif
1450 #ifdef ALLOW_IOV_BYPASS_SG
1451 	struct scatterlist *sg;
1452 #endif
1453 
1454 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1455 	ioc = GET_IOC(dev);
1456 	ASSERT(ioc);
1457 
1458 #ifdef ALLOW_IOV_BYPASS_SG
1459 	ASSERT(to_pci_dev(dev)->dma_mask);
1460 	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1461 		for_each_sg(sglist, sg, nents, filled) {
1462 			sg->dma_length = sg->length;
1463 			sg->dma_address = virt_to_phys(sba_sg_address(sg));
1464 		}
1465 		return filled;
1466 	}
1467 #endif
1468 	/* Fast path single entry scatterlists. */
1469 	if (nents == 1) {
1470 		sglist->dma_length = sglist->length;
1471 		sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1472 		return 1;
1473 	}
1474 
1475 #ifdef ASSERT_PDIR_SANITY
1476 	spin_lock_irqsave(&ioc->res_lock, flags);
1477 	if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1478 	{
1479 		sba_dump_sg(ioc, sglist, nents);
1480 		panic("Check before sba_map_sg_attrs()");
1481 	}
1482 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1483 #endif
1484 
1485 	prefetch(ioc->res_hint);
1486 
1487 	/*
1488 	** First coalesce the chunks and allocate I/O pdir space
1489 	**
1490 	** If this is one DMA stream, we can properly map using the
1491 	** correct virtual address associated with each DMA page.
1492 	** w/o this association, we wouldn't have coherent DMA!
1493 	** Access to the virtual address is what forces a two pass algorithm.
1494 	*/
1495 	coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1496 
1497 	/*
1498 	** Program the I/O Pdir
1499 	**
1500 	** map the virtual addresses to the I/O Pdir
1501 	** o dma_address will contain the pdir index
1502 	** o dma_len will contain the number of bytes to map
1503 	** o address contains the virtual address.
1504 	*/
1505 	filled = sba_fill_pdir(ioc, sglist, nents);
1506 
1507 #ifdef ASSERT_PDIR_SANITY
1508 	spin_lock_irqsave(&ioc->res_lock, flags);
1509 	if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1510 	{
1511 		sba_dump_sg(ioc, sglist, nents);
1512 		panic("Check after sba_map_sg_attrs()\n");
1513 	}
1514 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1515 #endif
1516 
1517 	ASSERT(coalesced == filled);
1518 	DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1519 
1520 	return filled;
1521 }
1522 
1523 /**
1524  * sba_unmap_sg_attrs - unmap Scatter/Gather list
1525  * @dev: instance of PCI owned by the driver that's asking.
1526  * @sglist:  array of buffer/length pairs
1527  * @nents:  number of entries in list
1528  * @dir:  R/W or both.
1529  * @attrs: optional dma attributes
1530  *
1531  * See Documentation/PCI/PCI-DMA-mapping.txt
1532  */
1533 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1534 			       int nents, enum dma_data_direction dir,
1535 			       struct dma_attrs *attrs)
1536 {
1537 #ifdef ASSERT_PDIR_SANITY
1538 	struct ioc *ioc;
1539 	unsigned long flags;
1540 #endif
1541 
1542 	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",
1543 		   __func__, nents, sba_sg_address(sglist), sglist->length);
1544 
1545 #ifdef ASSERT_PDIR_SANITY
1546 	ioc = GET_IOC(dev);
1547 	ASSERT(ioc);
1548 
1549 	spin_lock_irqsave(&ioc->res_lock, flags);
1550 	sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1551 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1552 #endif
1553 
1554 	while (nents && sglist->dma_length) {
1555 
1556 		sba_unmap_single_attrs(dev, sglist->dma_address,
1557 				       sglist->dma_length, dir, attrs);
1558 		sglist = sg_next(sglist);
1559 		nents--;
1560 	}
1561 
1562 	DBG_RUN_SG("%s() DONE (nents %d)\n", __func__,  nents);
1563 
1564 #ifdef ASSERT_PDIR_SANITY
1565 	spin_lock_irqsave(&ioc->res_lock, flags);
1566 	sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1567 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1568 #endif
1569 
1570 }
1571 
1572 /**************************************************************
1573 *
1574 *   Initialization and claim
1575 *
1576 ***************************************************************/
1577 
1578 static void __init
1579 ioc_iova_init(struct ioc *ioc)
1580 {
1581 	int tcnfg;
1582 	int agp_found = 0;
1583 	struct pci_dev *device = NULL;
1584 #ifdef FULL_VALID_PDIR
1585 	unsigned long index;
1586 #endif
1587 
1588 	/*
1589 	** Firmware programs the base and size of a "safe IOVA space"
1590 	** (one that doesn't overlap memory or LMMIO space) in the
1591 	** IBASE and IMASK registers.
1592 	*/
1593 	ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1594 	ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1595 
1596 	ioc->iov_size = ~ioc->imask + 1;
1597 
1598 	DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1599 		__func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1600 		ioc->iov_size >> 20);
1601 
1602 	switch (iovp_size) {
1603 		case  4*1024: tcnfg = 0; break;
1604 		case  8*1024: tcnfg = 1; break;
1605 		case 16*1024: tcnfg = 2; break;
1606 		case 64*1024: tcnfg = 3; break;
1607 		default:
1608 			panic(PFX "Unsupported IOTLB page size %ldK",
1609 				iovp_size >> 10);
1610 			break;
1611 	}
1612 	WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1613 
1614 	ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1615 	ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1616 						   get_order(ioc->pdir_size));
1617 	if (!ioc->pdir_base)
1618 		panic(PFX "Couldn't allocate I/O Page Table\n");
1619 
1620 	memset(ioc->pdir_base, 0, ioc->pdir_size);
1621 
1622 	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1623 		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1624 
1625 	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1626 	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1627 
1628 	/*
1629 	** If an AGP device is present, only use half of the IOV space
1630 	** for PCI DMA.  Unfortunately we can't know ahead of time
1631 	** whether GART support will actually be used, for now we
1632 	** can just key on an AGP device found in the system.
1633 	** We program the next pdir index after we stop w/ a key for
1634 	** the GART code to handshake on.
1635 	*/
1636 	for_each_pci_dev(device)
1637 		agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1638 
1639 	if (agp_found && reserve_sba_gart) {
1640 		printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1641 		      ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1642 		ioc->pdir_size /= 2;
1643 		((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1644 	}
1645 #ifdef FULL_VALID_PDIR
1646 	/*
1647   	** Check to see if the spill page has been allocated, we don't need more than
1648 	** one across multiple SBAs.
1649 	*/
1650 	if (!prefetch_spill_page) {
1651 		char *spill_poison = "SBAIOMMU POISON";
1652 		int poison_size = 16;
1653 		void *poison_addr, *addr;
1654 
1655 		addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1656 		if (!addr)
1657 			panic(PFX "Couldn't allocate PDIR spill page\n");
1658 
1659 		poison_addr = addr;
1660 		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1661 			memcpy(poison_addr, spill_poison, poison_size);
1662 
1663 		prefetch_spill_page = virt_to_phys(addr);
1664 
1665 		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1666 	}
1667 	/*
1668   	** Set all the PDIR entries valid w/ the spill page as the target
1669 	*/
1670 	for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1671 		((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1672 #endif
1673 
1674 	/* Clear I/O TLB of any possible entries */
1675 	WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1676 	READ_REG(ioc->ioc_hpa + IOC_PCOM);
1677 
1678 	/* Enable IOVA translation */
1679 	WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1680 	READ_REG(ioc->ioc_hpa + IOC_IBASE);
1681 }
1682 
1683 static void __init
1684 ioc_resource_init(struct ioc *ioc)
1685 {
1686 	spin_lock_init(&ioc->res_lock);
1687 #if DELAYED_RESOURCE_CNT > 0
1688 	spin_lock_init(&ioc->saved_lock);
1689 #endif
1690 
1691 	/* resource map size dictated by pdir_size */
1692 	ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1693 	ioc->res_size >>= 3;  /* convert bit count to byte count */
1694 	DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1695 
1696 	ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1697 						 get_order(ioc->res_size));
1698 	if (!ioc->res_map)
1699 		panic(PFX "Couldn't allocate resource map\n");
1700 
1701 	memset(ioc->res_map, 0, ioc->res_size);
1702 	/* next available IOVP - circular search */
1703 	ioc->res_hint = (unsigned long *) ioc->res_map;
1704 
1705 #ifdef ASSERT_PDIR_SANITY
1706 	/* Mark first bit busy - ie no IOVA 0 */
1707 	ioc->res_map[0] = 0x1;
1708 	ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1709 #endif
1710 #ifdef FULL_VALID_PDIR
1711 	/* Mark the last resource used so we don't prefetch beyond IOVA space */
1712 	ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1713 	ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1714 							      | prefetch_spill_page);
1715 #endif
1716 
1717 	DBG_INIT("%s() res_map %x %p\n", __func__,
1718 		 ioc->res_size, (void *) ioc->res_map);
1719 }
1720 
1721 static void __init
1722 ioc_sac_init(struct ioc *ioc)
1723 {
1724 	struct pci_dev *sac = NULL;
1725 	struct pci_controller *controller = NULL;
1726 
1727 	/*
1728 	 * pci_alloc_coherent() must return a DMA address which is
1729 	 * SAC (single address cycle) addressable, so allocate a
1730 	 * pseudo-device to enforce that.
1731 	 */
1732 	sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1733 	if (!sac)
1734 		panic(PFX "Couldn't allocate struct pci_dev");
1735 
1736 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1737 	if (!controller)
1738 		panic(PFX "Couldn't allocate struct pci_controller");
1739 
1740 	controller->iommu = ioc;
1741 	sac->sysdata = controller;
1742 	sac->dma_mask = 0xFFFFFFFFUL;
1743 #ifdef CONFIG_PCI
1744 	sac->dev.bus = &pci_bus_type;
1745 #endif
1746 	ioc->sac_only_dev = sac;
1747 }
1748 
1749 static void __init
1750 ioc_zx1_init(struct ioc *ioc)
1751 {
1752 	unsigned long rope_config;
1753 	unsigned int i;
1754 
1755 	if (ioc->rev < 0x20)
1756 		panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1757 
1758 	/* 38 bit memory controller + extra bit for range displaced by MMIO */
1759 	ioc->dma_mask = (0x1UL << 39) - 1;
1760 
1761 	/*
1762 	** Clear ROPE(N)_CONFIG AO bit.
1763 	** Disables "NT Ordering" (~= !"Relaxed Ordering")
1764 	** Overrides bit 1 in DMA Hint Sets.
1765 	** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1766 	*/
1767 	for (i=0; i<(8*8); i+=8) {
1768 		rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1769 		rope_config &= ~IOC_ROPE_AO;
1770 		WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1771 	}
1772 }
1773 
1774 typedef void (initfunc)(struct ioc *);
1775 
1776 struct ioc_iommu {
1777 	u32 func_id;
1778 	char *name;
1779 	initfunc *init;
1780 };
1781 
1782 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1783 	{ ZX1_IOC_ID, "zx1", ioc_zx1_init },
1784 	{ ZX2_IOC_ID, "zx2", NULL },
1785 	{ SX1000_IOC_ID, "sx1000", NULL },
1786 	{ SX2000_IOC_ID, "sx2000", NULL },
1787 };
1788 
1789 static struct ioc * __init
1790 ioc_init(u64 hpa, void *handle)
1791 {
1792 	struct ioc *ioc;
1793 	struct ioc_iommu *info;
1794 
1795 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1796 	if (!ioc)
1797 		return NULL;
1798 
1799 	ioc->next = ioc_list;
1800 	ioc_list = ioc;
1801 
1802 	ioc->handle = handle;
1803 	ioc->ioc_hpa = ioremap(hpa, 0x1000);
1804 
1805 	ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1806 	ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1807 	ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL;	/* conservative */
1808 
1809 	for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1810 		if (ioc->func_id == info->func_id) {
1811 			ioc->name = info->name;
1812 			if (info->init)
1813 				(info->init)(ioc);
1814 		}
1815 	}
1816 
1817 	iovp_size = (1 << iovp_shift);
1818 	iovp_mask = ~(iovp_size - 1);
1819 
1820 	DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1821 		PAGE_SIZE >> 10, iovp_size >> 10);
1822 
1823 	if (!ioc->name) {
1824 		ioc->name = kmalloc(24, GFP_KERNEL);
1825 		if (ioc->name)
1826 			sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1827 				ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1828 		else
1829 			ioc->name = "Unknown";
1830 	}
1831 
1832 	ioc_iova_init(ioc);
1833 	ioc_resource_init(ioc);
1834 	ioc_sac_init(ioc);
1835 
1836 	if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1837 		ia64_max_iommu_merge_mask = ~iovp_mask;
1838 
1839 	printk(KERN_INFO PFX
1840 		"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1841 		ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1842 		hpa, ioc->iov_size >> 20, ioc->ibase);
1843 
1844 	return ioc;
1845 }
1846 
1847 
1848 
1849 /**************************************************************************
1850 **
1851 **   SBA initialization code (HW and SW)
1852 **
1853 **   o identify SBA chip itself
1854 **   o FIXME: initialize DMA hints for reasonable defaults
1855 **
1856 **************************************************************************/
1857 
1858 #ifdef CONFIG_PROC_FS
1859 static void *
1860 ioc_start(struct seq_file *s, loff_t *pos)
1861 {
1862 	struct ioc *ioc;
1863 	loff_t n = *pos;
1864 
1865 	for (ioc = ioc_list; ioc; ioc = ioc->next)
1866 		if (!n--)
1867 			return ioc;
1868 
1869 	return NULL;
1870 }
1871 
1872 static void *
1873 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1874 {
1875 	struct ioc *ioc = v;
1876 
1877 	++*pos;
1878 	return ioc->next;
1879 }
1880 
1881 static void
1882 ioc_stop(struct seq_file *s, void *v)
1883 {
1884 }
1885 
1886 static int
1887 ioc_show(struct seq_file *s, void *v)
1888 {
1889 	struct ioc *ioc = v;
1890 	unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1891 	int i, used = 0;
1892 
1893 	seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1894 		ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1895 #ifdef CONFIG_NUMA
1896 	if (ioc->node != MAX_NUMNODES)
1897 		seq_printf(s, "NUMA node       : %d\n", ioc->node);
1898 #endif
1899 	seq_printf(s, "IOVA size       : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1900 	seq_printf(s, "IOVA page size  : %ld kb\n", iovp_size/1024);
1901 
1902 	for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1903 		used += hweight64(*res_ptr);
1904 
1905 	seq_printf(s, "PDIR size       : %d entries\n", ioc->pdir_size >> 3);
1906 	seq_printf(s, "PDIR used       : %d entries\n", used);
1907 
1908 #ifdef PDIR_SEARCH_TIMING
1909 	{
1910 		unsigned long i = 0, avg = 0, min, max;
1911 		min = max = ioc->avg_search[0];
1912 		for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1913 			avg += ioc->avg_search[i];
1914 			if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1915 			if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1916 		}
1917 		avg /= SBA_SEARCH_SAMPLE;
1918 		seq_printf(s, "Bitmap search   : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1919 		           min, avg, max);
1920 	}
1921 #endif
1922 #ifndef ALLOW_IOV_BYPASS
1923 	 seq_printf(s, "IOVA bypass disabled\n");
1924 #endif
1925 	return 0;
1926 }
1927 
1928 static const struct seq_operations ioc_seq_ops = {
1929 	.start = ioc_start,
1930 	.next  = ioc_next,
1931 	.stop  = ioc_stop,
1932 	.show  = ioc_show
1933 };
1934 
1935 static int
1936 ioc_open(struct inode *inode, struct file *file)
1937 {
1938 	return seq_open(file, &ioc_seq_ops);
1939 }
1940 
1941 static const struct file_operations ioc_fops = {
1942 	.open    = ioc_open,
1943 	.read    = seq_read,
1944 	.llseek  = seq_lseek,
1945 	.release = seq_release
1946 };
1947 
1948 static void __init
1949 ioc_proc_init(void)
1950 {
1951 	struct proc_dir_entry *dir;
1952 
1953 	dir = proc_mkdir("bus/mckinley", NULL);
1954 	if (!dir)
1955 		return;
1956 
1957 	proc_create(ioc_list->name, 0, dir, &ioc_fops);
1958 }
1959 #endif
1960 
1961 static void
1962 sba_connect_bus(struct pci_bus *bus)
1963 {
1964 	acpi_handle handle, parent;
1965 	acpi_status status;
1966 	struct ioc *ioc;
1967 
1968 	if (!PCI_CONTROLLER(bus))
1969 		panic(PFX "no sysdata on bus %d!\n", bus->number);
1970 
1971 	if (PCI_CONTROLLER(bus)->iommu)
1972 		return;
1973 
1974 	handle = PCI_CONTROLLER(bus)->acpi_handle;
1975 	if (!handle)
1976 		return;
1977 
1978 	/*
1979 	 * The IOC scope encloses PCI root bridges in the ACPI
1980 	 * namespace, so work our way out until we find an IOC we
1981 	 * claimed previously.
1982 	 */
1983 	do {
1984 		for (ioc = ioc_list; ioc; ioc = ioc->next)
1985 			if (ioc->handle == handle) {
1986 				PCI_CONTROLLER(bus)->iommu = ioc;
1987 				return;
1988 			}
1989 
1990 		status = acpi_get_parent(handle, &parent);
1991 		handle = parent;
1992 	} while (ACPI_SUCCESS(status));
1993 
1994 	printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1995 }
1996 
1997 #ifdef CONFIG_NUMA
1998 static void __init
1999 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2000 {
2001 	unsigned int node;
2002 	int pxm;
2003 
2004 	ioc->node = MAX_NUMNODES;
2005 
2006 	pxm = acpi_get_pxm(handle);
2007 
2008 	if (pxm < 0)
2009 		return;
2010 
2011 	node = pxm_to_node(pxm);
2012 
2013 	if (node >= MAX_NUMNODES || !node_online(node))
2014 		return;
2015 
2016 	ioc->node = node;
2017 	return;
2018 }
2019 #else
2020 #define sba_map_ioc_to_node(ioc, handle)
2021 #endif
2022 
2023 static int __init
2024 acpi_sba_ioc_add(struct acpi_device *device)
2025 {
2026 	struct ioc *ioc;
2027 	acpi_status status;
2028 	u64 hpa, length;
2029 	struct acpi_buffer buffer;
2030 	struct acpi_device_info *dev_info;
2031 
2032 	status = hp_acpi_csr_space(device->handle, &hpa, &length);
2033 	if (ACPI_FAILURE(status))
2034 		return 1;
2035 
2036 	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
2037 	status = acpi_get_object_info(device->handle, &buffer);
2038 	if (ACPI_FAILURE(status))
2039 		return 1;
2040 	dev_info = buffer.pointer;
2041 
2042 	/*
2043 	 * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
2044 	 * root bridges, and its CSR space includes the IOC function.
2045 	 */
2046 	if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
2047 		hpa += ZX1_IOC_OFFSET;
2048 		/* zx1 based systems default to kernel page size iommu pages */
2049 		if (!iovp_shift)
2050 			iovp_shift = min(PAGE_SHIFT, 16);
2051 	}
2052 	kfree(dev_info);
2053 
2054 	/*
2055 	 * default anything not caught above or specified on cmdline to 4k
2056 	 * iommu page size
2057 	 */
2058 	if (!iovp_shift)
2059 		iovp_shift = 12;
2060 
2061 	ioc = ioc_init(hpa, device->handle);
2062 	if (!ioc)
2063 		return 1;
2064 
2065 	/* setup NUMA node association */
2066 	sba_map_ioc_to_node(ioc, device->handle);
2067 	return 0;
2068 }
2069 
2070 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2071 	{"HWP0001", 0},
2072 	{"HWP0004", 0},
2073 	{"", 0},
2074 };
2075 static struct acpi_driver acpi_sba_ioc_driver = {
2076 	.name		= "IOC IOMMU Driver",
2077 	.ids		= hp_ioc_iommu_device_ids,
2078 	.ops		= {
2079 		.add	= acpi_sba_ioc_add,
2080 	},
2081 };
2082 
2083 extern struct dma_map_ops swiotlb_dma_ops;
2084 
2085 static int __init
2086 sba_init(void)
2087 {
2088 	if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2089 		return 0;
2090 
2091 #if defined(CONFIG_IA64_GENERIC)
2092 	/* If we are booting a kdump kernel, the sba_iommu will
2093 	 * cause devices that were not shutdown properly to MCA
2094 	 * as soon as they are turned back on.  Our only option for
2095 	 * a successful kdump kernel boot is to use the swiotlb.
2096 	 */
2097 	if (is_kdump_kernel()) {
2098 		dma_ops = &swiotlb_dma_ops;
2099 		if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2100 			panic("Unable to initialize software I/O TLB:"
2101 				  " Try machvec=dig boot option");
2102 		machvec_init("dig");
2103 		return 0;
2104 	}
2105 #endif
2106 
2107 	acpi_bus_register_driver(&acpi_sba_ioc_driver);
2108 	if (!ioc_list) {
2109 #ifdef CONFIG_IA64_GENERIC
2110 		/*
2111 		 * If we didn't find something sba_iommu can claim, we
2112 		 * need to setup the swiotlb and switch to the dig machvec.
2113 		 */
2114 		dma_ops = &swiotlb_dma_ops;
2115 		if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2116 			panic("Unable to find SBA IOMMU or initialize "
2117 			      "software I/O TLB: Try machvec=dig boot option");
2118 		machvec_init("dig");
2119 #else
2120 		panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2121 #endif
2122 		return 0;
2123 	}
2124 
2125 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2126 	/*
2127 	 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2128 	 * buffer setup to support devices with smaller DMA masks than
2129 	 * sba_iommu can handle.
2130 	 */
2131 	if (ia64_platform_is("hpzx1_swiotlb")) {
2132 		extern void hwsw_init(void);
2133 
2134 		hwsw_init();
2135 	}
2136 #endif
2137 
2138 #ifdef CONFIG_PCI
2139 	{
2140 		struct pci_bus *b = NULL;
2141 		while ((b = pci_find_next_bus(b)) != NULL)
2142 			sba_connect_bus(b);
2143 	}
2144 #endif
2145 
2146 #ifdef CONFIG_PROC_FS
2147 	ioc_proc_init();
2148 #endif
2149 	return 0;
2150 }
2151 
2152 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2153 
2154 static int __init
2155 nosbagart(char *str)
2156 {
2157 	reserve_sba_gart = 0;
2158 	return 1;
2159 }
2160 
2161 static int sba_dma_supported (struct device *dev, u64 mask)
2162 {
2163 	/* make sure it's at least 32bit capable */
2164 	return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2165 }
2166 
2167 static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2168 {
2169 	return 0;
2170 }
2171 
2172 __setup("nosbagart", nosbagart);
2173 
2174 static int __init
2175 sba_page_override(char *str)
2176 {
2177 	unsigned long page_size;
2178 
2179 	page_size = memparse(str, &str);
2180 	switch (page_size) {
2181 		case 4096:
2182 		case 8192:
2183 		case 16384:
2184 		case 65536:
2185 			iovp_shift = ffs(page_size) - 1;
2186 			break;
2187 		default:
2188 			printk("%s: unknown/unsupported iommu page size %ld\n",
2189 			       __func__, page_size);
2190 	}
2191 
2192 	return 1;
2193 }
2194 
2195 __setup("sbapagesize=",sba_page_override);
2196 
2197 struct dma_map_ops sba_dma_ops = {
2198 	.alloc_coherent		= sba_alloc_coherent,
2199 	.free_coherent		= sba_free_coherent,
2200 	.map_page		= sba_map_page,
2201 	.unmap_page		= sba_unmap_page,
2202 	.map_sg			= sba_map_sg_attrs,
2203 	.unmap_sg		= sba_unmap_sg_attrs,
2204 	.sync_single_for_cpu	= machvec_dma_sync_single,
2205 	.sync_sg_for_cpu	= machvec_dma_sync_sg,
2206 	.sync_single_for_device	= machvec_dma_sync_single,
2207 	.sync_sg_for_device	= machvec_dma_sync_sg,
2208 	.dma_supported		= sba_dma_supported,
2209 	.mapping_error		= sba_dma_mapping_error,
2210 };
2211 
2212 void sba_dma_init(void)
2213 {
2214 	dma_ops = &sba_dma_ops;
2215 }
2216