1 #ifndef _ASM_IA64_DMA_MAPPING_H 2 #define _ASM_IA64_DMA_MAPPING_H 3 4 /* 5 * Copyright (C) 2003-2004 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 #include <asm/machvec.h> 9 #include <linux/scatterlist.h> 10 #include <asm/swiotlb.h> 11 12 #define ARCH_HAS_DMA_GET_REQUIRED_MASK 13 14 extern struct dma_map_ops *dma_ops; 15 extern struct ia64_machine_vector ia64_mv; 16 extern void set_iommu_machvec(void); 17 18 extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t, 19 enum dma_data_direction); 20 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, 21 enum dma_data_direction); 22 23 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 24 dma_addr_t *daddr, gfp_t gfp) 25 { 26 struct dma_map_ops *ops = platform_dma_get_ops(dev); 27 return ops->alloc_coherent(dev, size, daddr, gfp); 28 } 29 30 static inline void dma_free_coherent(struct device *dev, size_t size, 31 void *caddr, dma_addr_t daddr) 32 { 33 struct dma_map_ops *ops = platform_dma_get_ops(dev); 34 ops->free_coherent(dev, size, caddr, daddr); 35 } 36 37 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 38 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 39 40 static inline dma_addr_t dma_map_single_attrs(struct device *dev, 41 void *caddr, size_t size, 42 enum dma_data_direction dir, 43 struct dma_attrs *attrs) 44 { 45 struct dma_map_ops *ops = platform_dma_get_ops(dev); 46 return ops->map_page(dev, virt_to_page(caddr), 47 (unsigned long)caddr & ~PAGE_MASK, size, 48 dir, attrs); 49 } 50 51 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, 52 size_t size, 53 enum dma_data_direction dir, 54 struct dma_attrs *attrs) 55 { 56 struct dma_map_ops *ops = platform_dma_get_ops(dev); 57 ops->unmap_page(dev, daddr, size, dir, attrs); 58 } 59 60 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 61 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) 62 63 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 64 int nents, enum dma_data_direction dir, 65 struct dma_attrs *attrs) 66 { 67 struct dma_map_ops *ops = platform_dma_get_ops(dev); 68 return ops->map_sg(dev, sgl, nents, dir, attrs); 69 } 70 71 static inline void dma_unmap_sg_attrs(struct device *dev, 72 struct scatterlist *sgl, int nents, 73 enum dma_data_direction dir, 74 struct dma_attrs *attrs) 75 { 76 struct dma_map_ops *ops = platform_dma_get_ops(dev); 77 ops->unmap_sg(dev, sgl, nents, dir, attrs); 78 } 79 80 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 81 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) 82 83 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, 84 size_t size, 85 enum dma_data_direction dir) 86 { 87 struct dma_map_ops *ops = platform_dma_get_ops(dev); 88 ops->sync_single_for_cpu(dev, daddr, size, dir); 89 } 90 91 static inline void dma_sync_sg_for_cpu(struct device *dev, 92 struct scatterlist *sgl, 93 int nents, enum dma_data_direction dir) 94 { 95 struct dma_map_ops *ops = platform_dma_get_ops(dev); 96 ops->sync_sg_for_cpu(dev, sgl, nents, dir); 97 } 98 99 static inline void dma_sync_single_for_device(struct device *dev, 100 dma_addr_t daddr, 101 size_t size, 102 enum dma_data_direction dir) 103 { 104 struct dma_map_ops *ops = platform_dma_get_ops(dev); 105 ops->sync_single_for_device(dev, daddr, size, dir); 106 } 107 108 static inline void dma_sync_sg_for_device(struct device *dev, 109 struct scatterlist *sgl, 110 int nents, 111 enum dma_data_direction dir) 112 { 113 struct dma_map_ops *ops = platform_dma_get_ops(dev); 114 ops->sync_sg_for_device(dev, sgl, nents, dir); 115 } 116 117 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 118 { 119 struct dma_map_ops *ops = platform_dma_get_ops(dev); 120 return ops->mapping_error(dev, daddr); 121 } 122 123 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 124 size_t offset, size_t size, 125 enum dma_data_direction dir) 126 { 127 struct dma_map_ops *ops = platform_dma_get_ops(dev); 128 return ops->map_page(dev, page, offset, size, dir, NULL); 129 } 130 131 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 132 size_t size, enum dma_data_direction dir) 133 { 134 dma_unmap_single(dev, addr, size, dir); 135 } 136 137 /* 138 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 139 * See Documentation/DMA-API.txt for details. 140 */ 141 142 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ 143 dma_sync_single_for_cpu(dev, dma_handle, size, dir) 144 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 145 dma_sync_single_for_device(dev, dma_handle, size, dir) 146 147 static inline int dma_supported(struct device *dev, u64 mask) 148 { 149 struct dma_map_ops *ops = platform_dma_get_ops(dev); 150 return ops->dma_supported(dev, mask); 151 } 152 153 static inline int 154 dma_set_mask (struct device *dev, u64 mask) 155 { 156 if (!dev->dma_mask || !dma_supported(dev, mask)) 157 return -EIO; 158 *dev->dma_mask = mask; 159 return 0; 160 } 161 162 extern int dma_get_cache_alignment(void); 163 164 static inline void 165 dma_cache_sync (struct device *dev, void *vaddr, size_t size, 166 enum dma_data_direction dir) 167 { 168 /* 169 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to 170 * ensure that dma_cache_sync() enforces order, hence the mb(). 171 */ 172 mb(); 173 } 174 175 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 176 177 #endif /* _ASM_IA64_DMA_MAPPING_H */ 178