1 /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com 2 * 3 * Implements the generic device dma API via the existing pci_ one 4 * for unconverted architectures 5 */ 6 7 #ifndef _ASM_GENERIC_DMA_MAPPING_H 8 #define _ASM_GENERIC_DMA_MAPPING_H 9 10 #include <linux/config.h> 11 12 #ifdef CONFIG_PCI 13 14 /* we implement the API below in terms of the existing PCI one, 15 * so include it */ 16 #include <linux/pci.h> 17 /* need struct page definitions */ 18 #include <linux/mm.h> 19 20 static inline int 21 dma_supported(struct device *dev, u64 mask) 22 { 23 BUG_ON(dev->bus != &pci_bus_type); 24 25 return pci_dma_supported(to_pci_dev(dev), mask); 26 } 27 28 static inline int 29 dma_set_mask(struct device *dev, u64 dma_mask) 30 { 31 BUG_ON(dev->bus != &pci_bus_type); 32 33 return pci_set_dma_mask(to_pci_dev(dev), dma_mask); 34 } 35 36 static inline void * 37 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 38 gfp_t flag) 39 { 40 BUG_ON(dev->bus != &pci_bus_type); 41 42 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); 43 } 44 45 static inline void 46 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 47 dma_addr_t dma_handle) 48 { 49 BUG_ON(dev->bus != &pci_bus_type); 50 51 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); 52 } 53 54 static inline dma_addr_t 55 dma_map_single(struct device *dev, void *cpu_addr, size_t size, 56 enum dma_data_direction direction) 57 { 58 BUG_ON(dev->bus != &pci_bus_type); 59 60 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); 61 } 62 63 static inline void 64 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 65 enum dma_data_direction direction) 66 { 67 BUG_ON(dev->bus != &pci_bus_type); 68 69 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); 70 } 71 72 static inline dma_addr_t 73 dma_map_page(struct device *dev, struct page *page, 74 unsigned long offset, size_t size, 75 enum dma_data_direction direction) 76 { 77 BUG_ON(dev->bus != &pci_bus_type); 78 79 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); 80 } 81 82 static inline void 83 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 84 enum dma_data_direction direction) 85 { 86 BUG_ON(dev->bus != &pci_bus_type); 87 88 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); 89 } 90 91 static inline int 92 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 93 enum dma_data_direction direction) 94 { 95 BUG_ON(dev->bus != &pci_bus_type); 96 97 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); 98 } 99 100 static inline void 101 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 102 enum dma_data_direction direction) 103 { 104 BUG_ON(dev->bus != &pci_bus_type); 105 106 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); 107 } 108 109 static inline void 110 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 111 enum dma_data_direction direction) 112 { 113 BUG_ON(dev->bus != &pci_bus_type); 114 115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, 116 size, (int)direction); 117 } 118 119 static inline void 120 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 121 enum dma_data_direction direction) 122 { 123 BUG_ON(dev->bus != &pci_bus_type); 124 125 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, 126 size, (int)direction); 127 } 128 129 static inline void 130 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 131 enum dma_data_direction direction) 132 { 133 BUG_ON(dev->bus != &pci_bus_type); 134 135 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction); 136 } 137 138 static inline void 139 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 140 enum dma_data_direction direction) 141 { 142 BUG_ON(dev->bus != &pci_bus_type); 143 144 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction); 145 } 146 147 static inline int 148 dma_mapping_error(dma_addr_t dma_addr) 149 { 150 return pci_dma_mapping_error(dma_addr); 151 } 152 153 154 #else 155 156 static inline int 157 dma_supported(struct device *dev, u64 mask) 158 { 159 return 0; 160 } 161 162 static inline int 163 dma_set_mask(struct device *dev, u64 dma_mask) 164 { 165 BUG(); 166 return 0; 167 } 168 169 static inline void * 170 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 171 gfp_t flag) 172 { 173 BUG(); 174 return NULL; 175 } 176 177 static inline void 178 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 179 dma_addr_t dma_handle) 180 { 181 BUG(); 182 } 183 184 static inline dma_addr_t 185 dma_map_single(struct device *dev, void *cpu_addr, size_t size, 186 enum dma_data_direction direction) 187 { 188 BUG(); 189 return 0; 190 } 191 192 static inline void 193 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 194 enum dma_data_direction direction) 195 { 196 BUG(); 197 } 198 199 static inline dma_addr_t 200 dma_map_page(struct device *dev, struct page *page, 201 unsigned long offset, size_t size, 202 enum dma_data_direction direction) 203 { 204 BUG(); 205 return 0; 206 } 207 208 static inline void 209 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 210 enum dma_data_direction direction) 211 { 212 BUG(); 213 } 214 215 static inline int 216 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 217 enum dma_data_direction direction) 218 { 219 BUG(); 220 return 0; 221 } 222 223 static inline void 224 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 225 enum dma_data_direction direction) 226 { 227 BUG(); 228 } 229 230 static inline void 231 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 232 enum dma_data_direction direction) 233 { 234 BUG(); 235 } 236 237 static inline void 238 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 239 enum dma_data_direction direction) 240 { 241 BUG(); 242 } 243 244 static inline void 245 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 246 enum dma_data_direction direction) 247 { 248 BUG(); 249 } 250 251 static inline void 252 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 253 enum dma_data_direction direction) 254 { 255 BUG(); 256 } 257 258 static inline int 259 dma_error(dma_addr_t dma_addr) 260 { 261 return 0; 262 } 263 264 #endif 265 266 /* Now for the API extensions over the pci_ one */ 267 268 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 269 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 270 #define dma_is_consistent(d) (1) 271 272 static inline int 273 dma_get_cache_alignment(void) 274 { 275 /* no easy way to get cache size on all processors, so return 276 * the maximum possible, to be safe */ 277 return (1 << INTERNODE_CACHE_SHIFT); 278 } 279 280 static inline void 281 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 282 unsigned long offset, size_t size, 283 enum dma_data_direction direction) 284 { 285 /* just sync everything, that's all the pci API can do */ 286 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction); 287 } 288 289 static inline void 290 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 291 unsigned long offset, size_t size, 292 enum dma_data_direction direction) 293 { 294 /* just sync everything, that's all the pci API can do */ 295 dma_sync_single_for_device(dev, dma_handle, offset+size, direction); 296 } 297 298 static inline void 299 dma_cache_sync(void *vaddr, size_t size, 300 enum dma_data_direction direction) 301 { 302 /* could define this in terms of the dma_cache ... operations, 303 * but if you get this on a platform, you should convert the platform 304 * to using the generic device DMA API */ 305 BUG(); 306 } 307 308 #endif 309 310