1 #ifndef _ASM_X86_DMA_MAPPING_H 2 #define _ASM_X86_DMA_MAPPING_H 3 4 /* 5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for 6 * documentation. 7 */ 8 9 #include <linux/scatterlist.h> 10 #include <asm/io.h> 11 #include <asm/swiotlb.h> 12 #include <asm-generic/dma-coherent.h> 13 14 extern dma_addr_t bad_dma_address; 15 extern int iommu_merge; 16 extern struct device x86_dma_fallback_dev; 17 extern int panic_on_overflow; 18 19 struct dma_mapping_ops { 20 int (*mapping_error)(struct device *dev, 21 dma_addr_t dma_addr); 22 void* (*alloc_coherent)(struct device *dev, size_t size, 23 dma_addr_t *dma_handle, gfp_t gfp); 24 void (*free_coherent)(struct device *dev, size_t size, 25 void *vaddr, dma_addr_t dma_handle); 26 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, 27 size_t size, int direction); 28 void (*unmap_single)(struct device *dev, dma_addr_t addr, 29 size_t size, int direction); 30 void (*sync_single_for_cpu)(struct device *hwdev, 31 dma_addr_t dma_handle, size_t size, 32 int direction); 33 void (*sync_single_for_device)(struct device *hwdev, 34 dma_addr_t dma_handle, size_t size, 35 int direction); 36 void (*sync_single_range_for_cpu)(struct device *hwdev, 37 dma_addr_t dma_handle, unsigned long offset, 38 size_t size, int direction); 39 void (*sync_single_range_for_device)(struct device *hwdev, 40 dma_addr_t dma_handle, unsigned long offset, 41 size_t size, int direction); 42 void (*sync_sg_for_cpu)(struct device *hwdev, 43 struct scatterlist *sg, int nelems, 44 int direction); 45 void (*sync_sg_for_device)(struct device *hwdev, 46 struct scatterlist *sg, int nelems, 47 int direction); 48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg, 49 int nents, int direction); 50 void (*unmap_sg)(struct device *hwdev, 51 struct scatterlist *sg, int nents, 52 int direction); 53 int (*dma_supported)(struct device *hwdev, u64 mask); 54 int is_phys; 55 }; 56 57 extern struct dma_mapping_ops *dma_ops; 58 59 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 60 { 61 #ifdef CONFIG_X86_32 62 return dma_ops; 63 #else 64 if (unlikely(!dev) || !dev->archdata.dma_ops) 65 return dma_ops; 66 else 67 return dev->archdata.dma_ops; 68 #endif /* _ASM_X86_DMA_MAPPING_H */ 69 } 70 71 /* Make sure we keep the same behaviour */ 72 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 73 { 74 #ifdef CONFIG_X86_32 75 return 0; 76 #else 77 struct dma_mapping_ops *ops = get_dma_ops(dev); 78 if (ops->mapping_error) 79 return ops->mapping_error(dev, dma_addr); 80 81 return (dma_addr == bad_dma_address); 82 #endif 83 } 84 85 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 86 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 87 #define dma_is_consistent(d, h) (1) 88 89 extern int dma_supported(struct device *hwdev, u64 mask); 90 extern int dma_set_mask(struct device *dev, u64 mask); 91 92 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 93 dma_addr_t *dma_addr, gfp_t flag); 94 95 static inline dma_addr_t 96 dma_map_single(struct device *hwdev, void *ptr, size_t size, 97 int direction) 98 { 99 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 100 101 BUG_ON(!valid_dma_direction(direction)); 102 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 103 } 104 105 static inline void 106 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 107 int direction) 108 { 109 struct dma_mapping_ops *ops = get_dma_ops(dev); 110 111 BUG_ON(!valid_dma_direction(direction)); 112 if (ops->unmap_single) 113 ops->unmap_single(dev, addr, size, direction); 114 } 115 116 static inline int 117 dma_map_sg(struct device *hwdev, struct scatterlist *sg, 118 int nents, int direction) 119 { 120 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 121 122 BUG_ON(!valid_dma_direction(direction)); 123 return ops->map_sg(hwdev, sg, nents, direction); 124 } 125 126 static inline void 127 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 128 int direction) 129 { 130 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 131 132 BUG_ON(!valid_dma_direction(direction)); 133 if (ops->unmap_sg) 134 ops->unmap_sg(hwdev, sg, nents, direction); 135 } 136 137 static inline void 138 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 139 size_t size, int direction) 140 { 141 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 142 143 BUG_ON(!valid_dma_direction(direction)); 144 if (ops->sync_single_for_cpu) 145 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 146 flush_write_buffers(); 147 } 148 149 static inline void 150 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 151 size_t size, int direction) 152 { 153 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 154 155 BUG_ON(!valid_dma_direction(direction)); 156 if (ops->sync_single_for_device) 157 ops->sync_single_for_device(hwdev, dma_handle, size, direction); 158 flush_write_buffers(); 159 } 160 161 static inline void 162 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 163 unsigned long offset, size_t size, int direction) 164 { 165 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 166 167 BUG_ON(!valid_dma_direction(direction)); 168 if (ops->sync_single_range_for_cpu) 169 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 170 size, direction); 171 flush_write_buffers(); 172 } 173 174 static inline void 175 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, 176 unsigned long offset, size_t size, 177 int direction) 178 { 179 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 180 181 BUG_ON(!valid_dma_direction(direction)); 182 if (ops->sync_single_range_for_device) 183 ops->sync_single_range_for_device(hwdev, dma_handle, 184 offset, size, direction); 185 flush_write_buffers(); 186 } 187 188 static inline void 189 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 190 int nelems, int direction) 191 { 192 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 193 194 BUG_ON(!valid_dma_direction(direction)); 195 if (ops->sync_sg_for_cpu) 196 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 197 flush_write_buffers(); 198 } 199 200 static inline void 201 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 202 int nelems, int direction) 203 { 204 struct dma_mapping_ops *ops = get_dma_ops(hwdev); 205 206 BUG_ON(!valid_dma_direction(direction)); 207 if (ops->sync_sg_for_device) 208 ops->sync_sg_for_device(hwdev, sg, nelems, direction); 209 210 flush_write_buffers(); 211 } 212 213 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 214 size_t offset, size_t size, 215 int direction) 216 { 217 struct dma_mapping_ops *ops = get_dma_ops(dev); 218 219 BUG_ON(!valid_dma_direction(direction)); 220 return ops->map_single(dev, page_to_phys(page) + offset, 221 size, direction); 222 } 223 224 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 225 size_t size, int direction) 226 { 227 dma_unmap_single(dev, addr, size, direction); 228 } 229 230 static inline void 231 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 232 enum dma_data_direction dir) 233 { 234 flush_write_buffers(); 235 } 236 237 static inline int dma_get_cache_alignment(void) 238 { 239 /* no easy way to get cache size on all x86, so return the 240 * maximum possible, to be safe */ 241 return boot_cpu_data.x86_clflush_size; 242 } 243 244 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 245 gfp_t gfp) 246 { 247 unsigned long dma_mask = 0; 248 249 dma_mask = dev->coherent_dma_mask; 250 if (!dma_mask) 251 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; 252 253 return dma_mask; 254 } 255 256 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) 257 { 258 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); 259 260 if (dma_mask <= DMA_24BIT_MASK) 261 gfp |= GFP_DMA; 262 #ifdef CONFIG_X86_64 263 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) 264 gfp |= GFP_DMA32; 265 #endif 266 return gfp; 267 } 268 269 static inline void * 270 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 271 gfp_t gfp) 272 { 273 struct dma_mapping_ops *ops = get_dma_ops(dev); 274 void *memory; 275 276 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 277 278 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) 279 return memory; 280 281 if (!dev) { 282 dev = &x86_dma_fallback_dev; 283 gfp |= GFP_DMA; 284 } 285 286 if (!is_device_dma_capable(dev)) 287 return NULL; 288 289 if (!ops->alloc_coherent) 290 return NULL; 291 292 return ops->alloc_coherent(dev, size, dma_handle, 293 dma_alloc_coherent_gfp_flags(dev, gfp)); 294 } 295 296 static inline void dma_free_coherent(struct device *dev, size_t size, 297 void *vaddr, dma_addr_t bus) 298 { 299 struct dma_mapping_ops *ops = get_dma_ops(dev); 300 301 WARN_ON(irqs_disabled()); /* for portability */ 302 303 if (dma_release_from_coherent(dev, get_order(size), vaddr)) 304 return; 305 306 if (ops->free_coherent) 307 ops->free_coherent(dev, size, vaddr, bus); 308 } 309 310 #endif 311