1 #ifndef _ASM_IA64_DMA_MAPPING_H 2 #define _ASM_IA64_DMA_MAPPING_H 3 4 /* 5 * Copyright (C) 2003-2004 Hewlett-Packard Co 6 * David Mosberger-Tang <davidm@hpl.hp.com> 7 */ 8 #include <asm/machvec.h> 9 #include <linux/scatterlist.h> 10 11 #define dma_alloc_coherent platform_dma_alloc_coherent 12 /* coherent mem. is cheap */ 13 static inline void * 14 dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 15 gfp_t flag) 16 { 17 return dma_alloc_coherent(dev, size, dma_handle, flag); 18 } 19 #define dma_free_coherent platform_dma_free_coherent 20 static inline void 21 dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 22 dma_addr_t dma_handle) 23 { 24 dma_free_coherent(dev, size, cpu_addr, dma_handle); 25 } 26 #define dma_map_single_attrs platform_dma_map_single_attrs 27 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 28 size_t size, int dir) 29 { 30 return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 31 } 32 #define dma_map_sg_attrs platform_dma_map_sg_attrs 33 static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 34 int nents, int dir) 35 { 36 return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 37 } 38 #define dma_unmap_single_attrs platform_dma_unmap_single_attrs 39 static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 40 size_t size, int dir) 41 { 42 return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 43 } 44 #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 45 static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 46 int nents, int dir) 47 { 48 return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 49 } 50 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu 51 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu 52 #define dma_sync_single_for_device platform_dma_sync_single_for_device 53 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device 54 #define dma_mapping_error platform_dma_mapping_error 55 56 #define dma_map_page(dev, pg, off, size, dir) \ 57 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) 58 #define dma_unmap_page(dev, dma_addr, size, dir) \ 59 dma_unmap_single(dev, dma_addr, size, dir) 60 61 /* 62 * Rest of this file is part of the "Advanced DMA API". Use at your own risk. 63 * See Documentation/DMA-API.txt for details. 64 */ 65 66 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ 67 dma_sync_single_for_cpu(dev, dma_handle, size, dir) 68 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 69 dma_sync_single_for_device(dev, dma_handle, size, dir) 70 71 #define dma_supported platform_dma_supported 72 73 static inline int 74 dma_set_mask (struct device *dev, u64 mask) 75 { 76 if (!dev->dma_mask || !dma_supported(dev, mask)) 77 return -EIO; 78 *dev->dma_mask = mask; 79 return 0; 80 } 81 82 extern int dma_get_cache_alignment(void); 83 84 static inline void 85 dma_cache_sync (struct device *dev, void *vaddr, size_t size, 86 enum dma_data_direction dir) 87 { 88 /* 89 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to 90 * ensure that dma_cache_sync() enforces order, hence the mb(). 91 */ 92 mb(); 93 } 94 95 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 96 97 #endif /* _ASM_IA64_DMA_MAPPING_H */ 98