1 /* 2 * DMA helper functions 3 * 4 * Copyright (c) 2009 Red Hat 5 * 6 * This work is licensed under the terms of the GNU General Public License 7 * (GNU GPL), version 2 or later. 8 */ 9 10 #ifndef DMA_H 11 #define DMA_H 12 13 #include <stdio.h> 14 #include "exec/memory.h" 15 #include "hw/hw.h" 16 #include "block/block.h" 17 #include "sysemu/kvm.h" 18 19 typedef struct DMAContext DMAContext; 20 typedef struct ScatterGatherEntry ScatterGatherEntry; 21 22 typedef enum { 23 DMA_DIRECTION_TO_DEVICE = 0, 24 DMA_DIRECTION_FROM_DEVICE = 1, 25 } DMADirection; 26 27 struct QEMUSGList { 28 ScatterGatherEntry *sg; 29 int nsg; 30 int nalloc; 31 size_t size; 32 DMAContext *dma; 33 }; 34 35 #ifndef CONFIG_USER_ONLY 36 37 /* 38 * When an IOMMU is present, bus addresses become distinct from 39 * CPU/memory physical addresses and may be a different size. Because 40 * the IOVA size depends more on the bus than on the platform, we more 41 * or less have to treat these as 64-bit always to cover all (or at 42 * least most) cases. 43 */ 44 typedef uint64_t dma_addr_t; 45 46 #define DMA_ADDR_BITS 64 47 #define DMA_ADDR_FMT "%" PRIx64 48 49 typedef int DMATranslateFunc(DMAContext *dma, 50 dma_addr_t addr, 51 hwaddr *paddr, 52 hwaddr *len, 53 DMADirection dir); 54 typedef void* DMAMapFunc(DMAContext *dma, 55 dma_addr_t addr, 56 dma_addr_t *len, 57 DMADirection dir); 58 typedef void DMAUnmapFunc(DMAContext *dma, 59 void *buffer, 60 dma_addr_t len, 61 DMADirection dir, 62 dma_addr_t access_len); 63 64 struct DMAContext { 65 AddressSpace *as; 66 DMATranslateFunc *translate; 67 DMAMapFunc *map; 68 DMAUnmapFunc *unmap; 69 }; 70 71 /* A global DMA context corresponding to the address_space_memory 72 * AddressSpace, for sysbus devices which do DMA. 73 */ 74 extern DMAContext dma_context_memory; 75 76 static inline void dma_barrier(DMAContext *dma, DMADirection dir) 77 { 78 /* 79 * This is called before DMA read and write operations 80 * unless the _relaxed form is used and is responsible 81 * for providing some sane ordering of accesses vs 82 * concurrently running VCPUs. 83 * 84 * Users of map(), unmap() or lower level st/ld_* 85 * operations are responsible for providing their own 86 * ordering via barriers. 87 * 88 * This primitive implementation does a simple smp_mb() 89 * before each operation which provides pretty much full 90 * ordering. 91 * 92 * A smarter implementation can be devised if needed to 93 * use lighter barriers based on the direction of the 94 * transfer, the DMA context, etc... 95 */ 96 if (kvm_enabled()) { 97 smp_mb(); 98 } 99 } 100 101 static inline bool dma_has_iommu(DMAContext *dma) 102 { 103 return dma && dma->translate; 104 } 105 106 /* Checks that the given range of addresses is valid for DMA. This is 107 * useful for certain cases, but usually you should just use 108 * dma_memory_{read,write}() and check for errors */ 109 bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, 110 DMADirection dir); 111 static inline bool dma_memory_valid(DMAContext *dma, 112 dma_addr_t addr, dma_addr_t len, 113 DMADirection dir) 114 { 115 if (!dma_has_iommu(dma)) { 116 return address_space_access_valid(dma->as, addr, len, 117 dir == DMA_DIRECTION_FROM_DEVICE); 118 } else { 119 return iommu_dma_memory_valid(dma, addr, len, dir); 120 } 121 } 122 123 int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, 124 void *buf, dma_addr_t len, DMADirection dir); 125 static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, 126 void *buf, dma_addr_t len, 127 DMADirection dir) 128 { 129 if (!dma_has_iommu(dma)) { 130 /* Fast-path for no IOMMU */ 131 address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); 132 return 0; 133 } else { 134 return iommu_dma_memory_rw(dma, addr, buf, len, dir); 135 } 136 } 137 138 static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, 139 void *buf, dma_addr_t len) 140 { 141 return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); 142 } 143 144 static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, 145 const void *buf, dma_addr_t len) 146 { 147 return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, 148 DMA_DIRECTION_FROM_DEVICE); 149 } 150 151 static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, 152 void *buf, dma_addr_t len, 153 DMADirection dir) 154 { 155 dma_barrier(dma, dir); 156 157 return dma_memory_rw_relaxed(dma, addr, buf, len, dir); 158 } 159 160 static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, 161 void *buf, dma_addr_t len) 162 { 163 return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); 164 } 165 166 static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, 167 const void *buf, dma_addr_t len) 168 { 169 return dma_memory_rw(dma, addr, (void *)buf, len, 170 DMA_DIRECTION_FROM_DEVICE); 171 } 172 173 int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, 174 dma_addr_t len); 175 176 int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); 177 178 void *iommu_dma_memory_map(DMAContext *dma, 179 dma_addr_t addr, dma_addr_t *len, 180 DMADirection dir); 181 static inline void *dma_memory_map(DMAContext *dma, 182 dma_addr_t addr, dma_addr_t *len, 183 DMADirection dir) 184 { 185 if (!dma_has_iommu(dma)) { 186 hwaddr xlen = *len; 187 void *p; 188 189 p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE); 190 *len = xlen; 191 return p; 192 } else { 193 return iommu_dma_memory_map(dma, addr, len, dir); 194 } 195 } 196 197 void iommu_dma_memory_unmap(DMAContext *dma, 198 void *buffer, dma_addr_t len, 199 DMADirection dir, dma_addr_t access_len); 200 static inline void dma_memory_unmap(DMAContext *dma, 201 void *buffer, dma_addr_t len, 202 DMADirection dir, dma_addr_t access_len) 203 { 204 if (!dma_has_iommu(dma)) { 205 address_space_unmap(dma->as, buffer, (hwaddr)len, 206 dir == DMA_DIRECTION_FROM_DEVICE, access_len); 207 } else { 208 iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); 209 } 210 } 211 212 #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ 213 static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ 214 dma_addr_t addr) \ 215 { \ 216 uint##_bits##_t val; \ 217 dma_memory_read(dma, addr, &val, (_bits) / 8); \ 218 return _end##_bits##_to_cpu(val); \ 219 } \ 220 static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ 221 dma_addr_t addr, \ 222 uint##_bits##_t val) \ 223 { \ 224 val = cpu_to_##_end##_bits(val); \ 225 dma_memory_write(dma, addr, &val, (_bits) / 8); \ 226 } 227 228 static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) 229 { 230 uint8_t val; 231 232 dma_memory_read(dma, addr, &val, 1); 233 return val; 234 } 235 236 static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) 237 { 238 dma_memory_write(dma, addr, &val, 1); 239 } 240 241 DEFINE_LDST_DMA(uw, w, 16, le); 242 DEFINE_LDST_DMA(l, l, 32, le); 243 DEFINE_LDST_DMA(q, q, 64, le); 244 DEFINE_LDST_DMA(uw, w, 16, be); 245 DEFINE_LDST_DMA(l, l, 32, be); 246 DEFINE_LDST_DMA(q, q, 64, be); 247 248 #undef DEFINE_LDST_DMA 249 250 void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate, 251 DMAMapFunc map, DMAUnmapFunc unmap); 252 253 struct ScatterGatherEntry { 254 dma_addr_t base; 255 dma_addr_t len; 256 }; 257 258 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); 259 void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); 260 void qemu_sglist_destroy(QEMUSGList *qsg); 261 #endif 262 263 typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num, 264 QEMUIOVector *iov, int nb_sectors, 265 BlockDriverCompletionFunc *cb, void *opaque); 266 267 BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs, 268 QEMUSGList *sg, uint64_t sector_num, 269 DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, 270 void *opaque, DMADirection dir); 271 BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, 272 QEMUSGList *sg, uint64_t sector, 273 BlockDriverCompletionFunc *cb, void *opaque); 274 BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, 275 QEMUSGList *sg, uint64_t sector, 276 BlockDriverCompletionFunc *cb, void *opaque); 277 uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); 278 uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); 279 280 void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, 281 QEMUSGList *sg, enum BlockAcctType type); 282 283 #endif 284