xref: /openbmc/linux/arch/alpha/kernel/pci_iommu.c (revision 7ae5c03a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	linux/arch/alpha/kernel/pci_iommu.c
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/pci.h>
9 #include <linux/gfp.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
16 
17 #include <asm/io.h>
18 #include <asm/hwrpb.h>
19 
20 #include "proto.h"
21 #include "pci_impl.h"
22 
23 
24 #define DEBUG_ALLOC 0
25 #if DEBUG_ALLOC > 0
26 # define DBGA(args...)		printk(KERN_DEBUG args)
27 #else
28 # define DBGA(args...)
29 #endif
30 #if DEBUG_ALLOC > 1
31 # define DBGA2(args...)		printk(KERN_DEBUG args)
32 #else
33 # define DBGA2(args...)
34 #endif
35 
36 #define DEBUG_NODIRECT 0
37 
38 #define ISA_DMA_MASK		0x00ffffff
39 
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr)
42 {
43 	return (paddr >> (PAGE_SHIFT-1)) | 1;
44 }
45 
46 /* Return the minimum of MAX or the first power of two larger
47    than main memory.  */
48 
49 unsigned long
50 size_for_memory(unsigned long max)
51 {
52 	unsigned long mem = max_low_pfn << PAGE_SHIFT;
53 	if (mem < max)
54 		max = roundup_pow_of_two(mem);
55 	return max;
56 }
57 
58 struct pci_iommu_arena * __init
59 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 		     unsigned long window_size, unsigned long align)
61 {
62 	unsigned long mem_size;
63 	struct pci_iommu_arena *arena;
64 
65 	mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
66 
67 	/* Note that the TLB lookup logic uses bitwise concatenation,
68 	   not addition, so the required arena alignment is based on
69 	   the size of the window.  Retain the align parameter so that
70 	   particular systems can over-align the arena.  */
71 	if (align < mem_size)
72 		align = mem_size;
73 
74 	arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
75 	if (!arena)
76 		panic("%s: Failed to allocate %zu bytes\n", __func__,
77 		      sizeof(*arena));
78 	arena->ptes = memblock_alloc(mem_size, align);
79 	if (!arena->ptes)
80 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
81 		      __func__, mem_size, align);
82 
83 	spin_lock_init(&arena->lock);
84 	arena->hose = hose;
85 	arena->dma_base = base;
86 	arena->size = window_size;
87 	arena->next_entry = 0;
88 
89 	/* Align allocations to a multiple of a page size.  Not needed
90 	   unless there are chip bugs.  */
91 	arena->align_entry = 1;
92 
93 	return arena;
94 }
95 
96 struct pci_iommu_arena * __init
97 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
98 		unsigned long window_size, unsigned long align)
99 {
100 	return iommu_arena_new_node(0, hose, base, window_size, align);
101 }
102 
103 /* Must be called with the arena lock held */
104 static long
105 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
106 		       long n, long mask)
107 {
108 	unsigned long *ptes;
109 	long i, p, nent;
110 	int pass = 0;
111 	unsigned long base;
112 	unsigned long boundary_size;
113 
114 	base = arena->dma_base >> PAGE_SHIFT;
115 	boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
116 
117 	/* Search forward for the first mask-aligned sequence of N free ptes */
118 	ptes = arena->ptes;
119 	nent = arena->size >> PAGE_SHIFT;
120 	p = ALIGN(arena->next_entry, mask + 1);
121 	i = 0;
122 
123 again:
124 	while (i < n && p+i < nent) {
125 		if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
126 			p = ALIGN(p + 1, mask + 1);
127 			goto again;
128 		}
129 
130 		if (ptes[p+i])
131 			p = ALIGN(p + i + 1, mask + 1), i = 0;
132 		else
133 			i = i + 1;
134 	}
135 
136 	if (i < n) {
137 		if (pass < 1) {
138 			/*
139 			 * Reached the end.  Flush the TLB and restart
140 			 * the search from the beginning.
141 			*/
142 			alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
143 
144 			pass++;
145 			p = 0;
146 			i = 0;
147 			goto again;
148 		} else
149 			return -1;
150 	}
151 
152 	/* Success. It's the responsibility of the caller to mark them
153 	   in use before releasing the lock */
154 	return p;
155 }
156 
157 static long
158 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
159 		  unsigned int align)
160 {
161 	unsigned long flags;
162 	unsigned long *ptes;
163 	long i, p, mask;
164 
165 	spin_lock_irqsave(&arena->lock, flags);
166 
167 	/* Search for N empty ptes */
168 	ptes = arena->ptes;
169 	mask = max(align, arena->align_entry) - 1;
170 	p = iommu_arena_find_pages(dev, arena, n, mask);
171 	if (p < 0) {
172 		spin_unlock_irqrestore(&arena->lock, flags);
173 		return -1;
174 	}
175 
176 	/* Success.  Mark them all in use, ie not zero and invalid
177 	   for the iommu tlb that could load them from under us.
178 	   The chip specific bits will fill this in with something
179 	   kosher when we return.  */
180 	for (i = 0; i < n; ++i)
181 		ptes[p+i] = IOMMU_INVALID_PTE;
182 
183 	arena->next_entry = p + n;
184 	spin_unlock_irqrestore(&arena->lock, flags);
185 
186 	return p;
187 }
188 
189 static void
190 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
191 {
192 	unsigned long *p;
193 	long i;
194 
195 	p = arena->ptes + ofs;
196 	for (i = 0; i < n; ++i)
197 		p[i] = 0;
198 }
199 
200 /*
201  * True if the machine supports DAC addressing, and DEV can
202  * make use of it given MASK.
203  */
204 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
205 {
206 	dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
207 	int ok = 1;
208 
209 	/* If this is not set, the machine doesn't support DAC at all.  */
210 	if (dac_offset == 0)
211 		ok = 0;
212 
213 	/* The device has to be able to address our DAC bit.  */
214 	if ((dac_offset & dev->dma_mask) != dac_offset)
215 		ok = 0;
216 
217 	/* If both conditions above are met, we are fine. */
218 	DBGA("pci_dac_dma_supported %s from %ps\n",
219 	     ok ? "yes" : "no", __builtin_return_address(0));
220 
221 	return ok;
222 }
223 
224 /* Map a single buffer of the indicated size for PCI DMA in streaming
225    mode.  The 32-bit PCI bus mastering address to use is returned.
226    Once the device is given the dma address, the device owns this memory
227    until either pci_unmap_single or pci_dma_sync_single is performed.  */
228 
229 static dma_addr_t
230 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
231 		 int dac_allowed)
232 {
233 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
234 	dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
235 	struct pci_iommu_arena *arena;
236 	long npages, dma_ofs, i;
237 	unsigned long paddr;
238 	dma_addr_t ret;
239 	unsigned int align = 0;
240 	struct device *dev = pdev ? &pdev->dev : NULL;
241 
242 	paddr = __pa(cpu_addr);
243 
244 #if !DEBUG_NODIRECT
245 	/* First check to see if we can use the direct map window.  */
246 	if (paddr + size + __direct_map_base - 1 <= max_dma
247 	    && paddr + size <= __direct_map_size) {
248 		ret = paddr + __direct_map_base;
249 
250 		DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
251 		      cpu_addr, size, ret, __builtin_return_address(0));
252 
253 		return ret;
254 	}
255 #endif
256 
257 	/* Next, use DAC if selected earlier.  */
258 	if (dac_allowed) {
259 		ret = paddr + alpha_mv.pci_dac_offset;
260 
261 		DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
262 		      cpu_addr, size, ret, __builtin_return_address(0));
263 
264 		return ret;
265 	}
266 
267 	/* If the machine doesn't define a pci_tbi routine, we have to
268 	   assume it doesn't support sg mapping, and, since we tried to
269 	   use direct_map above, it now must be considered an error. */
270 	if (! alpha_mv.mv_pci_tbi) {
271 		printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
272 		return DMA_MAPPING_ERROR;
273 	}
274 
275 	arena = hose->sg_pci;
276 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
277 		arena = hose->sg_isa;
278 
279 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
280 
281 	/* Force allocation to 64KB boundary for ISA bridges. */
282 	if (pdev && pdev == isa_bridge)
283 		align = 8;
284 	dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
285 	if (dma_ofs < 0) {
286 		printk(KERN_WARNING "pci_map_single failed: "
287 		       "could not allocate dma page tables\n");
288 		return DMA_MAPPING_ERROR;
289 	}
290 
291 	paddr &= PAGE_MASK;
292 	for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
293 		arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
294 
295 	ret = arena->dma_base + dma_ofs * PAGE_SIZE;
296 	ret += (unsigned long)cpu_addr & ~PAGE_MASK;
297 
298 	DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
299 	      cpu_addr, size, npages, ret, __builtin_return_address(0));
300 
301 	return ret;
302 }
303 
304 /* Helper for generic DMA-mapping functions. */
305 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
306 {
307 	if (dev && dev_is_pci(dev))
308 		return to_pci_dev(dev);
309 
310 	/* Assume that non-PCI devices asking for DMA are either ISA or EISA,
311 	   BUG() otherwise. */
312 	BUG_ON(!isa_bridge);
313 
314 	/* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
315 	   bridge is bus master then). */
316 	if (!dev || !dev->dma_mask || !*dev->dma_mask)
317 		return isa_bridge;
318 
319 	/* For EISA bus masters, return isa_bridge (it might have smaller
320 	   dma_mask due to wiring limitations). */
321 	if (*dev->dma_mask >= isa_bridge->dma_mask)
322 		return isa_bridge;
323 
324 	/* This assumes ISA bus master with dma_mask 0xffffff. */
325 	return NULL;
326 }
327 
328 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
329 				     unsigned long offset, size_t size,
330 				     enum dma_data_direction dir,
331 				     unsigned long attrs)
332 {
333 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
334 	int dac_allowed;
335 
336 	BUG_ON(dir == DMA_NONE);
337 
338 	dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
339 	return pci_map_single_1(pdev, (char *)page_address(page) + offset,
340 				size, dac_allowed);
341 }
342 
343 /* Unmap a single streaming mode DMA translation.  The DMA_ADDR and
344    SIZE must match what was provided for in a previous pci_map_single
345    call.  All other usages are undefined.  After this call, reads by
346    the cpu to the buffer are guaranteed to see whatever the device
347    wrote there.  */
348 
349 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
350 				 size_t size, enum dma_data_direction dir,
351 				 unsigned long attrs)
352 {
353 	unsigned long flags;
354 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 	struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
356 	struct pci_iommu_arena *arena;
357 	long dma_ofs, npages;
358 
359 	BUG_ON(dir == DMA_NONE);
360 
361 	if (dma_addr >= __direct_map_base
362 	    && dma_addr < __direct_map_base + __direct_map_size) {
363 		/* Nothing to do.  */
364 
365 		DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
366 		      dma_addr, size, __builtin_return_address(0));
367 
368 		return;
369 	}
370 
371 	if (dma_addr > 0xffffffff) {
372 		DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
373 		      dma_addr, size, __builtin_return_address(0));
374 		return;
375 	}
376 
377 	arena = hose->sg_pci;
378 	if (!arena || dma_addr < arena->dma_base)
379 		arena = hose->sg_isa;
380 
381 	dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
382 	if (dma_ofs * PAGE_SIZE >= arena->size) {
383 		printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
384 		       " base %llx size %x\n",
385 		       dma_addr, arena->dma_base, arena->size);
386 		return;
387 		BUG();
388 	}
389 
390 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
391 
392 	spin_lock_irqsave(&arena->lock, flags);
393 
394 	iommu_arena_free(arena, dma_ofs, npages);
395 
396         /* If we're freeing ptes above the `next_entry' pointer (they
397            may have snuck back into the TLB since the last wrap flush),
398            we need to flush the TLB before reallocating the latter.  */
399 	if (dma_ofs >= arena->next_entry)
400 		alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
401 
402 	spin_unlock_irqrestore(&arena->lock, flags);
403 
404 	DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
405 	      dma_addr, size, npages, __builtin_return_address(0));
406 }
407 
408 /* Allocate and map kernel buffer using consistent mode DMA for PCI
409    device.  Returns non-NULL cpu-view pointer to the buffer if
410    successful and sets *DMA_ADDRP to the pci side dma address as well,
411    else DMA_ADDRP is undefined.  */
412 
413 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
414 				      dma_addr_t *dma_addrp, gfp_t gfp,
415 				      unsigned long attrs)
416 {
417 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
418 	void *cpu_addr;
419 	long order = get_order(size);
420 
421 	gfp &= ~GFP_DMA;
422 
423 try_again:
424 	cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
425 	if (! cpu_addr) {
426 		printk(KERN_INFO "pci_alloc_consistent: "
427 		       "get_free_pages failed from %ps\n",
428 			__builtin_return_address(0));
429 		/* ??? Really atomic allocation?  Otherwise we could play
430 		   with vmalloc and sg if we can't find contiguous memory.  */
431 		return NULL;
432 	}
433 	memset(cpu_addr, 0, size);
434 
435 	*dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
436 	if (*dma_addrp == DMA_MAPPING_ERROR) {
437 		free_pages((unsigned long)cpu_addr, order);
438 		if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
439 			return NULL;
440 		/* The address doesn't fit required mask and we
441 		   do not have iommu. Try again with GFP_DMA. */
442 		gfp |= GFP_DMA;
443 		goto try_again;
444 	}
445 
446 	DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
447 	      size, cpu_addr, *dma_addrp, __builtin_return_address(0));
448 
449 	return cpu_addr;
450 }
451 
452 /* Free and unmap a consistent DMA buffer.  CPU_ADDR and DMA_ADDR must
453    be values that were returned from pci_alloc_consistent.  SIZE must
454    be the same as what as passed into pci_alloc_consistent.
455    References to the memory and mappings associated with CPU_ADDR or
456    DMA_ADDR past this call are illegal.  */
457 
458 static void alpha_pci_free_coherent(struct device *dev, size_t size,
459 				    void *cpu_addr, dma_addr_t dma_addr,
460 				    unsigned long attrs)
461 {
462 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
463 	dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
464 	free_pages((unsigned long)cpu_addr, get_order(size));
465 
466 	DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
467 	      dma_addr, size, __builtin_return_address(0));
468 }
469 
470 /* Classify the elements of the scatterlist.  Write dma_address
471    of each element with:
472 	0   : Followers all physically adjacent.
473 	1   : Followers all virtually adjacent.
474 	-1  : Not leader, physically adjacent to previous.
475 	-2  : Not leader, virtually adjacent to previous.
476    Write dma_length of each leader with the combined lengths of
477    the mergable followers.  */
478 
479 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
480 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
481 
482 static void
483 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
484 	    int virt_ok)
485 {
486 	unsigned long next_paddr;
487 	struct scatterlist *leader;
488 	long leader_flag, leader_length;
489 	unsigned int max_seg_size;
490 
491 	leader = sg;
492 	leader_flag = 0;
493 	leader_length = leader->length;
494 	next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
495 
496 	/* we will not marge sg without device. */
497 	max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
498 	for (++sg; sg < end; ++sg) {
499 		unsigned long addr, len;
500 		addr = SG_ENT_PHYS_ADDRESS(sg);
501 		len = sg->length;
502 
503 		if (leader_length + len > max_seg_size)
504 			goto new_segment;
505 
506 		if (next_paddr == addr) {
507 			sg->dma_address = -1;
508 			leader_length += len;
509 		} else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
510 			sg->dma_address = -2;
511 			leader_flag = 1;
512 			leader_length += len;
513 		} else {
514 new_segment:
515 			leader->dma_address = leader_flag;
516 			leader->dma_length = leader_length;
517 			leader = sg;
518 			leader_flag = 0;
519 			leader_length = len;
520 		}
521 
522 		next_paddr = addr + len;
523 	}
524 
525 	leader->dma_address = leader_flag;
526 	leader->dma_length = leader_length;
527 }
528 
529 /* Given a scatterlist leader, choose an allocation method and fill
530    in the blanks.  */
531 
532 static int
533 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
534 	struct scatterlist *out, struct pci_iommu_arena *arena,
535 	dma_addr_t max_dma, int dac_allowed)
536 {
537 	unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
538 	long size = leader->dma_length;
539 	struct scatterlist *sg;
540 	unsigned long *ptes;
541 	long npages, dma_ofs, i;
542 
543 #if !DEBUG_NODIRECT
544 	/* If everything is physically contiguous, and the addresses
545 	   fall into the direct-map window, use it.  */
546 	if (leader->dma_address == 0
547 	    && paddr + size + __direct_map_base - 1 <= max_dma
548 	    && paddr + size <= __direct_map_size) {
549 		out->dma_address = paddr + __direct_map_base;
550 		out->dma_length = size;
551 
552 		DBGA("    sg_fill: [%p,%lx] -> direct %llx\n",
553 		     __va(paddr), size, out->dma_address);
554 
555 		return 0;
556 	}
557 #endif
558 
559 	/* If physically contiguous and DAC is available, use it.  */
560 	if (leader->dma_address == 0 && dac_allowed) {
561 		out->dma_address = paddr + alpha_mv.pci_dac_offset;
562 		out->dma_length = size;
563 
564 		DBGA("    sg_fill: [%p,%lx] -> DAC %llx\n",
565 		     __va(paddr), size, out->dma_address);
566 
567 		return 0;
568 	}
569 
570 	/* Otherwise, we'll use the iommu to make the pages virtually
571 	   contiguous.  */
572 
573 	paddr &= ~PAGE_MASK;
574 	npages = iommu_num_pages(paddr, size, PAGE_SIZE);
575 	dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
576 	if (dma_ofs < 0) {
577 		/* If we attempted a direct map above but failed, die.  */
578 		if (leader->dma_address == 0)
579 			return -1;
580 
581 		/* Otherwise, break up the remaining virtually contiguous
582 		   hunks into individual direct maps and retry.  */
583 		sg_classify(dev, leader, end, 0);
584 		return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
585 	}
586 
587 	out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
588 	out->dma_length = size;
589 
590 	DBGA("    sg_fill: [%p,%lx] -> sg %llx np %ld\n",
591 	     __va(paddr), size, out->dma_address, npages);
592 
593 	/* All virtually contiguous.  We need to find the length of each
594 	   physically contiguous subsegment to fill in the ptes.  */
595 	ptes = &arena->ptes[dma_ofs];
596 	sg = leader;
597 	do {
598 #if DEBUG_ALLOC > 0
599 		struct scatterlist *last_sg = sg;
600 #endif
601 
602 		size = sg->length;
603 		paddr = SG_ENT_PHYS_ADDRESS(sg);
604 
605 		while (sg+1 < end && (int) sg[1].dma_address == -1) {
606 			size += sg[1].length;
607 			sg = sg_next(sg);
608 		}
609 
610 		npages = iommu_num_pages(paddr, size, PAGE_SIZE);
611 
612 		paddr &= PAGE_MASK;
613 		for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
614 			*ptes++ = mk_iommu_pte(paddr);
615 
616 #if DEBUG_ALLOC > 0
617 		DBGA("    (%ld) [%p,%x] np %ld\n",
618 		     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
619 		     last_sg->length, npages);
620 		while (++last_sg <= sg) {
621 			DBGA("        (%ld) [%p,%x] cont\n",
622 			     last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
623 			     last_sg->length);
624 		}
625 #endif
626 	} while (++sg < end && (int) sg->dma_address < 0);
627 
628 	return 1;
629 }
630 
631 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
632 			    int nents, enum dma_data_direction dir,
633 			    unsigned long attrs)
634 {
635 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
636 	struct scatterlist *start, *end, *out;
637 	struct pci_controller *hose;
638 	struct pci_iommu_arena *arena;
639 	dma_addr_t max_dma;
640 	int dac_allowed;
641 
642 	BUG_ON(dir == DMA_NONE);
643 
644 	dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
645 
646 	/* Fast path single entry scatterlists.  */
647 	if (nents == 1) {
648 		sg->dma_length = sg->length;
649 		sg->dma_address
650 		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
651 				     sg->length, dac_allowed);
652 		if (sg->dma_address == DMA_MAPPING_ERROR)
653 			return -EIO;
654 		return 1;
655 	}
656 
657 	start = sg;
658 	end = sg + nents;
659 
660 	/* First, prepare information about the entries.  */
661 	sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
662 
663 	/* Second, figure out where we're going to map things.  */
664 	if (alpha_mv.mv_pci_tbi) {
665 		hose = pdev ? pdev->sysdata : pci_isa_hose;
666 		max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
667 		arena = hose->sg_pci;
668 		if (!arena || arena->dma_base + arena->size - 1 > max_dma)
669 			arena = hose->sg_isa;
670 	} else {
671 		max_dma = -1;
672 		arena = NULL;
673 		hose = NULL;
674 	}
675 
676 	/* Third, iterate over the scatterlist leaders and allocate
677 	   dma space as needed.  */
678 	for (out = sg; sg < end; ++sg) {
679 		if ((int) sg->dma_address < 0)
680 			continue;
681 		if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
682 			goto error;
683 		out++;
684 	}
685 
686 	/* Mark the end of the list for pci_unmap_sg.  */
687 	if (out < end)
688 		out->dma_length = 0;
689 
690 	if (out - start == 0) {
691 		printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
692 		return -ENOMEM;
693 	}
694 	DBGA("pci_map_sg: %ld entries\n", out - start);
695 
696 	return out - start;
697 
698  error:
699 	printk(KERN_WARNING "pci_map_sg failed: "
700 	       "could not allocate dma page tables\n");
701 
702 	/* Some allocation failed while mapping the scatterlist
703 	   entries.  Unmap them now.  */
704 	if (out > start)
705 		dma_unmap_sg(&pdev->dev, start, out - start, dir);
706 	return -ENOMEM;
707 }
708 
709 /* Unmap a set of streaming mode DMA translations.  Again, cpu read
710    rules concerning calls here are the same as for pci_unmap_single()
711    above.  */
712 
713 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
714 			       int nents, enum dma_data_direction dir,
715 			       unsigned long attrs)
716 {
717 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
718 	unsigned long flags;
719 	struct pci_controller *hose;
720 	struct pci_iommu_arena *arena;
721 	struct scatterlist *end;
722 	dma_addr_t max_dma;
723 	dma_addr_t fbeg, fend;
724 
725 	BUG_ON(dir == DMA_NONE);
726 
727 	if (! alpha_mv.mv_pci_tbi)
728 		return;
729 
730 	hose = pdev ? pdev->sysdata : pci_isa_hose;
731 	max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
732 	arena = hose->sg_pci;
733 	if (!arena || arena->dma_base + arena->size - 1 > max_dma)
734 		arena = hose->sg_isa;
735 
736 	fbeg = -1, fend = 0;
737 
738 	spin_lock_irqsave(&arena->lock, flags);
739 
740 	for (end = sg + nents; sg < end; ++sg) {
741 		dma_addr_t addr;
742 		size_t size;
743 		long npages, ofs;
744 		dma_addr_t tend;
745 
746 		addr = sg->dma_address;
747 		size = sg->dma_length;
748 		if (!size)
749 			break;
750 
751 		if (addr > 0xffffffff) {
752 			/* It's a DAC address -- nothing to do.  */
753 			DBGA("    (%ld) DAC [%llx,%zx]\n",
754 			      sg - end + nents, addr, size);
755 			continue;
756 		}
757 
758 		if (addr >= __direct_map_base
759 		    && addr < __direct_map_base + __direct_map_size) {
760 			/* Nothing to do.  */
761 			DBGA("    (%ld) direct [%llx,%zx]\n",
762 			      sg - end + nents, addr, size);
763 			continue;
764 		}
765 
766 		DBGA("    (%ld) sg [%llx,%zx]\n",
767 		     sg - end + nents, addr, size);
768 
769 		npages = iommu_num_pages(addr, size, PAGE_SIZE);
770 		ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
771 		iommu_arena_free(arena, ofs, npages);
772 
773 		tend = addr + size - 1;
774 		if (fbeg > addr) fbeg = addr;
775 		if (fend < tend) fend = tend;
776 	}
777 
778         /* If we're freeing ptes above the `next_entry' pointer (they
779            may have snuck back into the TLB since the last wrap flush),
780            we need to flush the TLB before reallocating the latter.  */
781 	if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
782 		alpha_mv.mv_pci_tbi(hose, fbeg, fend);
783 
784 	spin_unlock_irqrestore(&arena->lock, flags);
785 
786 	DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
787 }
788 
789 /* Return whether the given PCI device DMA address mask can be
790    supported properly.  */
791 
792 static int alpha_pci_supported(struct device *dev, u64 mask)
793 {
794 	struct pci_dev *pdev = alpha_gendev_to_pci(dev);
795 	struct pci_controller *hose;
796 	struct pci_iommu_arena *arena;
797 
798 	/* If there exists a direct map, and the mask fits either
799 	   the entire direct mapped space or the total system memory as
800 	   shifted by the map base */
801 	if (__direct_map_size != 0
802 	    && (__direct_map_base + __direct_map_size - 1 <= mask ||
803 		__direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
804 		return 1;
805 
806 	/* Check that we have a scatter-gather arena that fits.  */
807 	hose = pdev ? pdev->sysdata : pci_isa_hose;
808 	arena = hose->sg_isa;
809 	if (arena && arena->dma_base + arena->size - 1 <= mask)
810 		return 1;
811 	arena = hose->sg_pci;
812 	if (arena && arena->dma_base + arena->size - 1 <= mask)
813 		return 1;
814 
815 	/* As last resort try ZONE_DMA.  */
816 	if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
817 		return 1;
818 
819 	return 0;
820 }
821 
822 
823 /*
824  * AGP GART extensions to the IOMMU
825  */
826 int
827 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
828 {
829 	unsigned long flags;
830 	unsigned long *ptes;
831 	long i, p;
832 
833 	if (!arena) return -EINVAL;
834 
835 	spin_lock_irqsave(&arena->lock, flags);
836 
837 	/* Search for N empty ptes.  */
838 	ptes = arena->ptes;
839 	p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
840 	if (p < 0) {
841 		spin_unlock_irqrestore(&arena->lock, flags);
842 		return -1;
843 	}
844 
845 	/* Success.  Mark them all reserved (ie not zero and invalid)
846 	   for the iommu tlb that could load them from under us.
847 	   They will be filled in with valid bits by _bind() */
848 	for (i = 0; i < pg_count; ++i)
849 		ptes[p+i] = IOMMU_RESERVED_PTE;
850 
851 	arena->next_entry = p + pg_count;
852 	spin_unlock_irqrestore(&arena->lock, flags);
853 
854 	return p;
855 }
856 
857 int
858 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
859 {
860 	unsigned long *ptes;
861 	long i;
862 
863 	if (!arena) return -EINVAL;
864 
865 	ptes = arena->ptes;
866 
867 	/* Make sure they're all reserved first... */
868 	for(i = pg_start; i < pg_start + pg_count; i++)
869 		if (ptes[i] != IOMMU_RESERVED_PTE)
870 			return -EBUSY;
871 
872 	iommu_arena_free(arena, pg_start, pg_count);
873 	return 0;
874 }
875 
876 int
877 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
878 	   struct page **pages)
879 {
880 	unsigned long flags;
881 	unsigned long *ptes;
882 	long i, j;
883 
884 	if (!arena) return -EINVAL;
885 
886 	spin_lock_irqsave(&arena->lock, flags);
887 
888 	ptes = arena->ptes;
889 
890 	for(j = pg_start; j < pg_start + pg_count; j++) {
891 		if (ptes[j] != IOMMU_RESERVED_PTE) {
892 			spin_unlock_irqrestore(&arena->lock, flags);
893 			return -EBUSY;
894 		}
895 	}
896 
897 	for(i = 0, j = pg_start; i < pg_count; i++, j++)
898 		ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
899 
900 	spin_unlock_irqrestore(&arena->lock, flags);
901 
902 	return 0;
903 }
904 
905 int
906 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
907 {
908 	unsigned long *p;
909 	long i;
910 
911 	if (!arena) return -EINVAL;
912 
913 	p = arena->ptes + pg_start;
914 	for(i = 0; i < pg_count; i++)
915 		p[i] = IOMMU_RESERVED_PTE;
916 
917 	return 0;
918 }
919 
920 const struct dma_map_ops alpha_pci_ops = {
921 	.alloc			= alpha_pci_alloc_coherent,
922 	.free			= alpha_pci_free_coherent,
923 	.map_page		= alpha_pci_map_page,
924 	.unmap_page		= alpha_pci_unmap_page,
925 	.map_sg			= alpha_pci_map_sg,
926 	.unmap_sg		= alpha_pci_unmap_sg,
927 	.dma_supported		= alpha_pci_supported,
928 	.mmap			= dma_common_mmap,
929 	.get_sgtable		= dma_common_get_sgtable,
930 	.alloc_pages		= dma_common_alloc_pages,
931 	.free_pages		= dma_common_free_pages,
932 };
933 EXPORT_SYMBOL(alpha_pci_ops);
934