xref: /openbmc/linux/arch/parisc/kernel/pci-dma.c (revision bbecb07f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 ** PARISC 1.1 Dynamic DMA mapping support.
4 ** This implementation is for PA-RISC platforms that do not support
5 ** I/O TLBs (aka DMA address translation hardware).
6 ** See Documentation/DMA-API-HOWTO.txt for interface definitions.
7 **
8 **      (c) Copyright 1999,2000 Hewlett-Packard Company
9 **      (c) Copyright 2000 Grant Grundler
10 **	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
11 **      (c) Copyright 2000 John Marvin
12 **
13 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
14 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 **
16 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
17 **
18 ** - ggg
19 */
20 
21 #include <linux/init.h>
22 #include <linux/gfp.h>
23 #include <linux/mm.h>
24 #include <linux/pci.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/string.h>
28 #include <linux/types.h>
29 #include <linux/scatterlist.h>
30 #include <linux/export.h>
31 
32 #include <asm/cacheflush.h>
33 #include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
34 #include <asm/io.h>
35 #include <asm/page.h>	/* get_order */
36 #include <asm/pgalloc.h>
37 #include <linux/uaccess.h>
38 #include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
39 
40 static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
41 static unsigned long pcxl_used_bytes __read_mostly = 0;
42 static unsigned long pcxl_used_pages __read_mostly = 0;
43 
44 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
45 static DEFINE_SPINLOCK(pcxl_res_lock);
46 static char    *pcxl_res_map;
47 static int     pcxl_res_hint;
48 static int     pcxl_res_size;
49 
50 #ifdef DEBUG_PCXL_RESOURCE
51 #define DBG_RES(x...)	printk(x)
52 #else
53 #define DBG_RES(x...)
54 #endif
55 
56 
57 /*
58 ** Dump a hex representation of the resource map.
59 */
60 
61 #ifdef DUMP_RESMAP
62 static
63 void dump_resmap(void)
64 {
65 	u_long *res_ptr = (unsigned long *)pcxl_res_map;
66 	u_long i = 0;
67 
68 	printk("res_map: ");
69 	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
70 		printk("%08lx ", *res_ptr);
71 
72 	printk("\n");
73 }
74 #else
75 static inline void dump_resmap(void) {;}
76 #endif
77 
78 static int pa11_dma_supported( struct device *dev, u64 mask)
79 {
80 	return 1;
81 }
82 
83 static inline int map_pte_uncached(pte_t * pte,
84 		unsigned long vaddr,
85 		unsigned long size, unsigned long *paddr_ptr)
86 {
87 	unsigned long end;
88 	unsigned long orig_vaddr = vaddr;
89 
90 	vaddr &= ~PMD_MASK;
91 	end = vaddr + size;
92 	if (end > PMD_SIZE)
93 		end = PMD_SIZE;
94 	do {
95 		unsigned long flags;
96 
97 		if (!pte_none(*pte))
98 			printk(KERN_ERR "map_pte_uncached: page already exists\n");
99 		purge_tlb_start(flags);
100 		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
101 		pdtlb_kernel(orig_vaddr);
102 		purge_tlb_end(flags);
103 		vaddr += PAGE_SIZE;
104 		orig_vaddr += PAGE_SIZE;
105 		(*paddr_ptr) += PAGE_SIZE;
106 		pte++;
107 	} while (vaddr < end);
108 	return 0;
109 }
110 
111 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
112 		unsigned long size, unsigned long *paddr_ptr)
113 {
114 	unsigned long end;
115 	unsigned long orig_vaddr = vaddr;
116 
117 	vaddr &= ~PGDIR_MASK;
118 	end = vaddr + size;
119 	if (end > PGDIR_SIZE)
120 		end = PGDIR_SIZE;
121 	do {
122 		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
123 		if (!pte)
124 			return -ENOMEM;
125 		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
126 			return -ENOMEM;
127 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
128 		orig_vaddr += PMD_SIZE;
129 		pmd++;
130 	} while (vaddr < end);
131 	return 0;
132 }
133 
134 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
135 		unsigned long paddr)
136 {
137 	pgd_t * dir;
138 	unsigned long end = vaddr + size;
139 
140 	dir = pgd_offset_k(vaddr);
141 	do {
142 		pmd_t *pmd;
143 
144 		pmd = pmd_alloc(NULL, dir, vaddr);
145 		if (!pmd)
146 			return -ENOMEM;
147 		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
148 			return -ENOMEM;
149 		vaddr = vaddr + PGDIR_SIZE;
150 		dir++;
151 	} while (vaddr && (vaddr < end));
152 	return 0;
153 }
154 
155 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
156 		unsigned long size)
157 {
158 	pte_t * pte;
159 	unsigned long end;
160 	unsigned long orig_vaddr = vaddr;
161 
162 	if (pmd_none(*pmd))
163 		return;
164 	if (pmd_bad(*pmd)) {
165 		pmd_ERROR(*pmd);
166 		pmd_clear(pmd);
167 		return;
168 	}
169 	pte = pte_offset_map(pmd, vaddr);
170 	vaddr &= ~PMD_MASK;
171 	end = vaddr + size;
172 	if (end > PMD_SIZE)
173 		end = PMD_SIZE;
174 	do {
175 		unsigned long flags;
176 		pte_t page = *pte;
177 
178 		pte_clear(&init_mm, vaddr, pte);
179 		purge_tlb_start(flags);
180 		pdtlb_kernel(orig_vaddr);
181 		purge_tlb_end(flags);
182 		vaddr += PAGE_SIZE;
183 		orig_vaddr += PAGE_SIZE;
184 		pte++;
185 		if (pte_none(page) || pte_present(page))
186 			continue;
187 		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
188 	} while (vaddr < end);
189 }
190 
191 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
192 		unsigned long size)
193 {
194 	pmd_t * pmd;
195 	unsigned long end;
196 	unsigned long orig_vaddr = vaddr;
197 
198 	if (pgd_none(*dir))
199 		return;
200 	if (pgd_bad(*dir)) {
201 		pgd_ERROR(*dir);
202 		pgd_clear(dir);
203 		return;
204 	}
205 	pmd = pmd_offset(dir, vaddr);
206 	vaddr &= ~PGDIR_MASK;
207 	end = vaddr + size;
208 	if (end > PGDIR_SIZE)
209 		end = PGDIR_SIZE;
210 	do {
211 		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
212 		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
213 		orig_vaddr += PMD_SIZE;
214 		pmd++;
215 	} while (vaddr < end);
216 }
217 
218 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
219 {
220 	pgd_t * dir;
221 	unsigned long end = vaddr + size;
222 
223 	dir = pgd_offset_k(vaddr);
224 	do {
225 		unmap_uncached_pmd(dir, vaddr, end - vaddr);
226 		vaddr = vaddr + PGDIR_SIZE;
227 		dir++;
228 	} while (vaddr && (vaddr < end));
229 }
230 
231 #define PCXL_SEARCH_LOOP(idx, mask, size)  \
232        for(; res_ptr < res_end; ++res_ptr) \
233        { \
234                if(0 == ((*res_ptr) & mask)) { \
235                        *res_ptr |= mask; \
236 		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
237 		       pcxl_res_hint = idx + (size >> 3); \
238                        goto resource_found; \
239                } \
240        }
241 
242 #define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
243        u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
244        u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
245        PCXL_SEARCH_LOOP(idx, mask, size); \
246        res_ptr = (u##size *)&pcxl_res_map[0]; \
247        PCXL_SEARCH_LOOP(idx, mask, size); \
248 }
249 
250 unsigned long
251 pcxl_alloc_range(size_t size)
252 {
253 	int res_idx;
254 	u_long mask, flags;
255 	unsigned int pages_needed = size >> PAGE_SHIFT;
256 
257 	mask = (u_long) -1L;
258  	mask >>= BITS_PER_LONG - pages_needed;
259 
260 	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
261 		size, pages_needed, mask);
262 
263 	spin_lock_irqsave(&pcxl_res_lock, flags);
264 
265 	if(pages_needed <= 8) {
266 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
267 	} else if(pages_needed <= 16) {
268 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
269 	} else if(pages_needed <= 32) {
270 		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
271 	} else {
272 		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
273 		      __FILE__);
274 	}
275 
276 	dump_resmap();
277 	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
278 	      __FILE__);
279 
280 resource_found:
281 
282 	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
283 		res_idx, mask, pcxl_res_hint);
284 
285 	pcxl_used_pages += pages_needed;
286 	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
287 
288 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
289 
290 	dump_resmap();
291 
292 	/*
293 	** return the corresponding vaddr in the pcxl dma map
294 	*/
295 	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
296 }
297 
298 #define PCXL_FREE_MAPPINGS(idx, m, size) \
299 		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
300 		/* BUG_ON((*res_ptr & m) != m); */ \
301 		*res_ptr &= ~m;
302 
303 /*
304 ** clear bits in the pcxl resource map
305 */
306 static void
307 pcxl_free_range(unsigned long vaddr, size_t size)
308 {
309 	u_long mask, flags;
310 	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
311 	unsigned int pages_mapped = size >> PAGE_SHIFT;
312 
313 	mask = (u_long) -1L;
314  	mask >>= BITS_PER_LONG - pages_mapped;
315 
316 	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
317 		res_idx, size, pages_mapped, mask);
318 
319 	spin_lock_irqsave(&pcxl_res_lock, flags);
320 
321 	if(pages_mapped <= 8) {
322 		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
323 	} else if(pages_mapped <= 16) {
324 		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
325 	} else if(pages_mapped <= 32) {
326 		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
327 	} else {
328 		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
329 		      __FILE__);
330 	}
331 
332 	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
333 	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
334 
335 	spin_unlock_irqrestore(&pcxl_res_lock, flags);
336 
337 	dump_resmap();
338 }
339 
340 static int proc_pcxl_dma_show(struct seq_file *m, void *v)
341 {
342 #if 0
343 	u_long i = 0;
344 	unsigned long *res_ptr = (u_long *)pcxl_res_map;
345 #endif
346 	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
347 
348 	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
349 		PCXL_DMA_MAP_SIZE, total_pages);
350 
351 	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
352 
353 	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
354 	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
355 		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
356 		(pcxl_used_bytes * 100) / pcxl_res_size);
357 
358 	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
359 		total_pages - pcxl_used_pages, pcxl_used_pages,
360 		(pcxl_used_pages * 100 / total_pages));
361 
362 #if 0
363 	seq_puts(m, "\nResource bitmap:");
364 
365 	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
366 		if ((i & 7) == 0)
367 		    seq_puts(m,"\n   ");
368 		seq_printf(m, "%s %08lx", buf, *res_ptr);
369 	}
370 #endif
371 	seq_putc(m, '\n');
372 	return 0;
373 }
374 
375 static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
376 {
377 	return single_open(file, proc_pcxl_dma_show, NULL);
378 }
379 
380 static const struct file_operations proc_pcxl_dma_ops = {
381 	.owner		= THIS_MODULE,
382 	.open		= proc_pcxl_dma_open,
383 	.read		= seq_read,
384 	.llseek		= seq_lseek,
385 	.release	= single_release,
386 };
387 
388 static int __init
389 pcxl_dma_init(void)
390 {
391 	if (pcxl_dma_start == 0)
392 		return 0;
393 
394 	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
395 	pcxl_res_hint = 0;
396 	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
397 					    get_order(pcxl_res_size));
398 	memset(pcxl_res_map, 0, pcxl_res_size);
399 	proc_gsc_root = proc_mkdir("gsc", NULL);
400 	if (!proc_gsc_root)
401     		printk(KERN_WARNING
402 			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
403 	else {
404 		struct proc_dir_entry* ent;
405 		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
406 				  &proc_pcxl_dma_ops);
407 		if (!ent)
408 			printk(KERN_WARNING
409 				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
410 	}
411 	return 0;
412 }
413 
414 __initcall(pcxl_dma_init);
415 
416 static void *pa11_dma_alloc(struct device *dev, size_t size,
417 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
418 {
419 	unsigned long vaddr;
420 	unsigned long paddr;
421 	int order;
422 
423 	order = get_order(size);
424 	size = 1 << (order + PAGE_SHIFT);
425 	vaddr = pcxl_alloc_range(size);
426 	paddr = __get_free_pages(flag, order);
427 	flush_kernel_dcache_range(paddr, size);
428 	paddr = __pa(paddr);
429 	map_uncached_pages(vaddr, size, paddr);
430 	*dma_handle = (dma_addr_t) paddr;
431 
432 #if 0
433 /* This probably isn't needed to support EISA cards.
434 ** ISA cards will certainly only support 24-bit DMA addressing.
435 ** Not clear if we can, want, or need to support ISA.
436 */
437 	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
438 		gfp |= GFP_DMA;
439 #endif
440 	return (void *)vaddr;
441 }
442 
443 static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
444 		dma_addr_t dma_handle, unsigned long attrs)
445 {
446 	int order;
447 
448 	order = get_order(size);
449 	size = 1 << (order + PAGE_SHIFT);
450 	unmap_uncached_pages((unsigned long)vaddr, size);
451 	pcxl_free_range((unsigned long)vaddr, size);
452 	free_pages((unsigned long)__va(dma_handle), order);
453 }
454 
455 static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
456 		unsigned long offset, size_t size,
457 		enum dma_data_direction direction, unsigned long attrs)
458 {
459 	void *addr = page_address(page) + offset;
460 	BUG_ON(direction == DMA_NONE);
461 
462 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
463 		flush_kernel_dcache_range((unsigned long) addr, size);
464 
465 	return virt_to_phys(addr);
466 }
467 
468 static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
469 		size_t size, enum dma_data_direction direction,
470 		unsigned long attrs)
471 {
472 	BUG_ON(direction == DMA_NONE);
473 
474 	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
475 		return;
476 
477 	if (direction == DMA_TO_DEVICE)
478 		return;
479 
480 	/*
481 	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
482 	 * simple map/unmap case. However, it IS necessary if if
483 	 * pci_dma_sync_single_* has been called and the buffer reused.
484 	 */
485 
486 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
487 }
488 
489 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
490 		int nents, enum dma_data_direction direction,
491 		unsigned long attrs)
492 {
493 	int i;
494 	struct scatterlist *sg;
495 
496 	BUG_ON(direction == DMA_NONE);
497 
498 	for_each_sg(sglist, sg, nents, i) {
499 		unsigned long vaddr = (unsigned long)sg_virt(sg);
500 
501 		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
502 		sg_dma_len(sg) = sg->length;
503 
504 		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
505 			continue;
506 
507 		flush_kernel_dcache_range(vaddr, sg->length);
508 	}
509 	return nents;
510 }
511 
512 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
513 		int nents, enum dma_data_direction direction,
514 		unsigned long attrs)
515 {
516 	int i;
517 	struct scatterlist *sg;
518 
519 	BUG_ON(direction == DMA_NONE);
520 
521 	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
522 		return;
523 
524 	if (direction == DMA_TO_DEVICE)
525 		return;
526 
527 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
528 
529 	for_each_sg(sglist, sg, nents, i)
530 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
531 }
532 
533 static void pa11_dma_sync_single_for_cpu(struct device *dev,
534 		dma_addr_t dma_handle, size_t size,
535 		enum dma_data_direction direction)
536 {
537 	BUG_ON(direction == DMA_NONE);
538 
539 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
540 			size);
541 }
542 
543 static void pa11_dma_sync_single_for_device(struct device *dev,
544 		dma_addr_t dma_handle, size_t size,
545 		enum dma_data_direction direction)
546 {
547 	BUG_ON(direction == DMA_NONE);
548 
549 	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
550 			size);
551 }
552 
553 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
554 {
555 	int i;
556 	struct scatterlist *sg;
557 
558 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
559 
560 	for_each_sg(sglist, sg, nents, i)
561 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
562 }
563 
564 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
565 {
566 	int i;
567 	struct scatterlist *sg;
568 
569 	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
570 
571 	for_each_sg(sglist, sg, nents, i)
572 		flush_kernel_vmap_range(sg_virt(sg), sg->length);
573 }
574 
575 static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
576 	       enum dma_data_direction direction)
577 {
578 	flush_kernel_dcache_range((unsigned long)vaddr, size);
579 }
580 
581 const struct dma_map_ops pcxl_dma_ops = {
582 	.dma_supported =	pa11_dma_supported,
583 	.alloc =		pa11_dma_alloc,
584 	.free =			pa11_dma_free,
585 	.map_page =		pa11_dma_map_page,
586 	.unmap_page =		pa11_dma_unmap_page,
587 	.map_sg =		pa11_dma_map_sg,
588 	.unmap_sg =		pa11_dma_unmap_sg,
589 	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
590 	.sync_single_for_device = pa11_dma_sync_single_for_device,
591 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
592 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
593 	.cache_sync =		pa11_dma_cache_sync,
594 };
595 
596 static void *pcx_dma_alloc(struct device *dev, size_t size,
597 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
598 {
599 	void *addr;
600 
601 	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
602 		return NULL;
603 
604 	addr = (void *)__get_free_pages(flag, get_order(size));
605 	if (addr)
606 		*dma_handle = (dma_addr_t)virt_to_phys(addr);
607 
608 	return addr;
609 }
610 
611 static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
612 		dma_addr_t iova, unsigned long attrs)
613 {
614 	free_pages((unsigned long)vaddr, get_order(size));
615 	return;
616 }
617 
618 const struct dma_map_ops pcx_dma_ops = {
619 	.dma_supported =	pa11_dma_supported,
620 	.alloc =		pcx_dma_alloc,
621 	.free =			pcx_dma_free,
622 	.map_page =		pa11_dma_map_page,
623 	.unmap_page =		pa11_dma_unmap_page,
624 	.map_sg =		pa11_dma_map_sg,
625 	.unmap_sg =		pa11_dma_unmap_sg,
626 	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
627 	.sync_single_for_device = pa11_dma_sync_single_for_device,
628 	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
629 	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
630 	.cache_sync =		pa11_dma_cache_sync,
631 };
632