1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
3 
4 /*
5  * Copyright (C) 2003-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
11 #include <linux/dma-debug.h>
12 
13 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
14 
15 #define DMA_ERROR_CODE 0
16 
17 extern struct dma_map_ops *dma_ops;
18 extern struct ia64_machine_vector ia64_mv;
19 extern void set_iommu_machvec(void);
20 
21 extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
22 				    enum dma_data_direction);
23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
24 				enum dma_data_direction);
25 
26 #define get_dma_ops(dev) platform_dma_get_ops(dev)
27 
28 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
29 {
30 	if (!dev->dma_mask)
31 		return 0;
32 
33 	return addr + size - 1 <= *dev->dma_mask;
34 }
35 
36 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
37 {
38 	return paddr;
39 }
40 
41 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
42 {
43 	return daddr;
44 }
45 
46 static inline void
47 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
48 	enum dma_data_direction dir)
49 {
50 	/*
51 	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
52 	 * ensure that dma_cache_sync() enforces order, hence the mb().
53 	 */
54 	mb();
55 }
56 
57 #endif /* _ASM_IA64_DMA_MAPPING_H */
58