1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
3 
4 /*
5  * Copyright (C) 2003-2004 Hewlett-Packard Co
6  *	David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <asm/machvec.h>
9 #include <linux/scatterlist.h>
10 #include <asm/swiotlb.h>
11 
12 struct dma_mapping_ops {
13 	int             (*mapping_error)(struct device *dev,
14 					 dma_addr_t dma_addr);
15 	void*           (*alloc_coherent)(struct device *dev, size_t size,
16 				dma_addr_t *dma_handle, gfp_t gfp);
17 	void            (*free_coherent)(struct device *dev, size_t size,
18 				void *vaddr, dma_addr_t dma_handle);
19 	dma_addr_t      (*map_single)(struct device *hwdev, unsigned long ptr,
20 				size_t size, int direction);
21 	void            (*unmap_single)(struct device *dev, dma_addr_t addr,
22 				size_t size, int direction);
23 	void            (*sync_single_for_cpu)(struct device *hwdev,
24 				dma_addr_t dma_handle, size_t size,
25 				int direction);
26 	void            (*sync_single_for_device)(struct device *hwdev,
27 				dma_addr_t dma_handle, size_t size,
28 				int direction);
29 	void            (*sync_single_range_for_cpu)(struct device *hwdev,
30 				dma_addr_t dma_handle, unsigned long offset,
31 				size_t size, int direction);
32 	void            (*sync_single_range_for_device)(struct device *hwdev,
33 				dma_addr_t dma_handle, unsigned long offset,
34 				size_t size, int direction);
35 	void            (*sync_sg_for_cpu)(struct device *hwdev,
36 				struct scatterlist *sg, int nelems,
37 				int direction);
38 	void            (*sync_sg_for_device)(struct device *hwdev,
39 				struct scatterlist *sg, int nelems,
40 				int direction);
41 	int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
42 				int nents, int direction);
43 	void            (*unmap_sg)(struct device *hwdev,
44 				struct scatterlist *sg, int nents,
45 				int direction);
46 	int             (*dma_supported_op)(struct device *hwdev, u64 mask);
47 	int		is_phys;
48 };
49 
50 extern struct dma_mapping_ops *dma_ops;
51 extern struct ia64_machine_vector ia64_mv;
52 extern void set_iommu_machvec(void);
53 
54 #define dma_alloc_coherent(dev, size, handle, gfp)	\
55 	platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA)
56 
57 /* coherent mem. is cheap */
58 static inline void *
59 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
60 		      gfp_t flag)
61 {
62 	return dma_alloc_coherent(dev, size, dma_handle, flag);
63 }
64 #define dma_free_coherent	platform_dma_free_coherent
65 static inline void
66 dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
67 		     dma_addr_t dma_handle)
68 {
69 	dma_free_coherent(dev, size, cpu_addr, dma_handle);
70 }
71 #define dma_map_single_attrs	platform_dma_map_single_attrs
72 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
73 					size_t size, int dir)
74 {
75 	return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
76 }
77 #define dma_map_sg_attrs	platform_dma_map_sg_attrs
78 static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
79 			     int nents, int dir)
80 {
81 	return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
82 }
83 #define dma_unmap_single_attrs	platform_dma_unmap_single_attrs
84 static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
85 				    size_t size, int dir)
86 {
87 	return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
88 }
89 #define dma_unmap_sg_attrs	platform_dma_unmap_sg_attrs
90 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
91 				int nents, int dir)
92 {
93 	return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
94 }
95 #define dma_sync_single_for_cpu	platform_dma_sync_single_for_cpu
96 #define dma_sync_sg_for_cpu	platform_dma_sync_sg_for_cpu
97 #define dma_sync_single_for_device platform_dma_sync_single_for_device
98 #define dma_sync_sg_for_device	platform_dma_sync_sg_for_device
99 #define dma_mapping_error	platform_dma_mapping_error
100 
101 #define dma_map_page(dev, pg, off, size, dir)				\
102 	dma_map_single(dev, page_address(pg) + (off), (size), (dir))
103 #define dma_unmap_page(dev, dma_addr, size, dir)			\
104 	dma_unmap_single(dev, dma_addr, size, dir)
105 
106 /*
107  * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
108  * See Documentation/DMA-API.txt for details.
109  */
110 
111 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)	\
112 	dma_sync_single_for_cpu(dev, dma_handle, size, dir)
113 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)	\
114 	dma_sync_single_for_device(dev, dma_handle, size, dir)
115 
116 #define dma_supported		platform_dma_supported
117 
118 static inline int
119 dma_set_mask (struct device *dev, u64 mask)
120 {
121 	if (!dev->dma_mask || !dma_supported(dev, mask))
122 		return -EIO;
123 	*dev->dma_mask = mask;
124 	return 0;
125 }
126 
127 extern int dma_get_cache_alignment(void);
128 
129 static inline void
130 dma_cache_sync (struct device *dev, void *vaddr, size_t size,
131 	enum dma_data_direction dir)
132 {
133 	/*
134 	 * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
135 	 * ensure that dma_cache_sync() enforces order, hence the mb().
136 	 */
137 	mb();
138 }
139 
140 #define dma_is_consistent(d, h)	(1)	/* all we do is coherent memory... */
141 
142 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
143 {
144 	return dma_ops;
145 }
146 
147 
148 
149 #endif /* _ASM_IA64_DMA_MAPPING_H */
150