xref: /openbmc/qemu/include/sysemu/dma.h (revision b278b60d)
1 /*
2  * DMA helper functions
3  *
4  * Copyright (c) 2009, 2020 Red Hat
5  *
6  * This work is licensed under the terms of the GNU General Public License
7  * (GNU GPL), version 2 or later.
8  */
9 
10 #ifndef DMA_H
11 #define DMA_H
12 
13 #include "exec/memory.h"
14 #include "exec/address-spaces.h"
15 #include "block/block.h"
16 #include "block/accounting.h"
17 
18 typedef enum {
19     DMA_DIRECTION_TO_DEVICE = 0,
20     DMA_DIRECTION_FROM_DEVICE = 1,
21 } DMADirection;
22 
23 /*
24  * When an IOMMU is present, bus addresses become distinct from
25  * CPU/memory physical addresses and may be a different size.  Because
26  * the IOVA size depends more on the bus than on the platform, we more
27  * or less have to treat these as 64-bit always to cover all (or at
28  * least most) cases.
29  */
30 typedef uint64_t dma_addr_t;
31 
32 #define DMA_ADDR_BITS 64
33 #define DMA_ADDR_FMT "%" PRIx64
34 
35 typedef struct ScatterGatherEntry ScatterGatherEntry;
36 
37 struct QEMUSGList {
38     ScatterGatherEntry *sg;
39     int nsg;
40     int nalloc;
41     dma_addr_t size;
42     DeviceState *dev;
43     AddressSpace *as;
44 };
45 
46 static inline void dma_barrier(AddressSpace *as, DMADirection dir)
47 {
48     /*
49      * This is called before DMA read and write operations
50      * unless the _relaxed form is used and is responsible
51      * for providing some sane ordering of accesses vs
52      * concurrently running VCPUs.
53      *
54      * Users of map(), unmap() or lower level st/ld_*
55      * operations are responsible for providing their own
56      * ordering via barriers.
57      *
58      * This primitive implementation does a simple smp_mb()
59      * before each operation which provides pretty much full
60      * ordering.
61      *
62      * A smarter implementation can be devised if needed to
63      * use lighter barriers based on the direction of the
64      * transfer, the DMA context, etc...
65      */
66     smp_mb();
67 }
68 
69 /* Checks that the given range of addresses is valid for DMA.  This is
70  * useful for certain cases, but usually you should just use
71  * dma_memory_{read,write}() and check for errors */
72 static inline bool dma_memory_valid(AddressSpace *as,
73                                     dma_addr_t addr, dma_addr_t len,
74                                     DMADirection dir, MemTxAttrs attrs)
75 {
76     return address_space_access_valid(as, addr, len,
77                                       dir == DMA_DIRECTION_FROM_DEVICE,
78                                       attrs);
79 }
80 
81 static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
82                                                 dma_addr_t addr,
83                                                 void *buf, dma_addr_t len,
84                                                 DMADirection dir,
85                                                 MemTxAttrs attrs)
86 {
87     return address_space_rw(as, addr, attrs,
88                             buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
89 }
90 
91 static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
92                                                   dma_addr_t addr,
93                                                   void *buf, dma_addr_t len)
94 {
95     return dma_memory_rw_relaxed(as, addr, buf, len,
96                                  DMA_DIRECTION_TO_DEVICE,
97                                  MEMTXATTRS_UNSPECIFIED);
98 }
99 
100 static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
101                                                    dma_addr_t addr,
102                                                    const void *buf,
103                                                    dma_addr_t len)
104 {
105     return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
106                                  DMA_DIRECTION_FROM_DEVICE,
107                                  MEMTXATTRS_UNSPECIFIED);
108 }
109 
110 /**
111  * dma_memory_rw: Read from or write to an address space from DMA controller.
112  *
113  * Return a MemTxResult indicating whether the operation succeeded
114  * or failed (eg unassigned memory, device rejected the transaction,
115  * IOMMU fault).
116  *
117  * @as: #AddressSpace to be accessed
118  * @addr: address within that address space
119  * @buf: buffer with the data transferred
120  * @len: the number of bytes to read or write
121  * @dir: indicates the transfer direction
122  * @attrs: memory transaction attributes
123  */
124 static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
125                                         void *buf, dma_addr_t len,
126                                         DMADirection dir, MemTxAttrs attrs)
127 {
128     dma_barrier(as, dir);
129 
130     return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
131 }
132 
133 /**
134  * dma_memory_read: Read from an address space from DMA controller.
135  *
136  * Return a MemTxResult indicating whether the operation succeeded
137  * or failed (eg unassigned memory, device rejected the transaction,
138  * IOMMU fault).  Called within RCU critical section.
139  *
140  * @as: #AddressSpace to be accessed
141  * @addr: address within that address space
142  * @buf: buffer with the data transferred
143  * @len: length of the data transferred
144  * @attrs: memory transaction attributes
145  */
146 static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
147                                           void *buf, dma_addr_t len,
148                                           MemTxAttrs attrs)
149 {
150     return dma_memory_rw(as, addr, buf, len,
151                          DMA_DIRECTION_TO_DEVICE, attrs);
152 }
153 
154 /**
155  * dma_memory_write: Write to address space from DMA controller.
156  *
157  * Return a MemTxResult indicating whether the operation succeeded
158  * or failed (eg unassigned memory, device rejected the transaction,
159  * IOMMU fault).
160  *
161  * @as: #AddressSpace to be accessed
162  * @addr: address within that address space
163  * @buf: buffer with the data transferred
164  * @len: the number of bytes to write
165  * @attrs: memory transaction attributes
166  */
167 static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
168                                            const void *buf, dma_addr_t len,
169                                            MemTxAttrs attrs)
170 {
171     return dma_memory_rw(as, addr, (void *)buf, len,
172                          DMA_DIRECTION_FROM_DEVICE, attrs);
173 }
174 
175 /**
176  * dma_memory_set: Fill memory with a constant byte from DMA controller.
177  *
178  * Return a MemTxResult indicating whether the operation succeeded
179  * or failed (eg unassigned memory, device rejected the transaction,
180  * IOMMU fault).
181  *
182  * @as: #AddressSpace to be accessed
183  * @addr: address within that address space
184  * @c: constant byte to fill the memory
185  * @len: the number of bytes to fill with the constant byte
186  * @attrs: memory transaction attributes
187  */
188 MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
189                            uint8_t c, dma_addr_t len, MemTxAttrs attrs);
190 
191 /**
192  * dma_memory_map: Map a physical memory region into a host virtual address.
193  *
194  * May map a subset of the requested range, given by and returned in @plen.
195  * May return %NULL and set *@plen to zero(0), if resources needed to perform
196  * the mapping are exhausted.
197  * Use only for reads OR writes - not for read-modify-write operations.
198  *
199  * @as: #AddressSpace to be accessed
200  * @addr: address within that address space
201  * @len: pointer to length of buffer; updated on return
202  * @dir: indicates the transfer direction
203  * @attrs: memory attributes
204  */
205 static inline void *dma_memory_map(AddressSpace *as,
206                                    dma_addr_t addr, dma_addr_t *len,
207                                    DMADirection dir, MemTxAttrs attrs)
208 {
209     hwaddr xlen = *len;
210     void *p;
211 
212     p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
213                           attrs);
214     *len = xlen;
215     return p;
216 }
217 
218 /**
219  * dma_memory_unmap: Unmaps a memory region previously mapped by dma_memory_map()
220  *
221  * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
222  * @access_len gives the amount of memory that was actually read or written
223  * by the caller.
224  *
225  * @as: #AddressSpace used
226  * @buffer: host pointer as returned by dma_memory_map()
227  * @len: buffer length as returned by dma_memory_map()
228  * @dir: indicates the transfer direction
229  * @access_len: amount of data actually transferred
230  */
231 static inline void dma_memory_unmap(AddressSpace *as,
232                                     void *buffer, dma_addr_t len,
233                                     DMADirection dir, dma_addr_t access_len)
234 {
235     address_space_unmap(as, buffer, (hwaddr)len,
236                         dir == DMA_DIRECTION_FROM_DEVICE, access_len);
237 }
238 
239 #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
240     static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
241                                                         dma_addr_t addr, \
242                                                         uint##_bits##_t *pval, \
243                                                         MemTxAttrs attrs) \
244     { \
245         MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
246         _end##_bits##_to_cpus(pval); \
247         return res; \
248     } \
249     static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
250                                                         dma_addr_t addr, \
251                                                         uint##_bits##_t val, \
252                                                         MemTxAttrs attrs) \
253     { \
254         val = cpu_to_##_end##_bits(val); \
255         return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
256     }
257 
258 static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
259                                    uint8_t *val, MemTxAttrs attrs)
260 {
261     return dma_memory_read(as, addr, val, 1, attrs);
262 }
263 
264 static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
265                                   uint8_t val, MemTxAttrs attrs)
266 {
267     return dma_memory_write(as, addr, &val, 1, attrs);
268 }
269 
270 DEFINE_LDST_DMA(uw, w, 16, le);
271 DEFINE_LDST_DMA(l, l, 32, le);
272 DEFINE_LDST_DMA(q, q, 64, le);
273 DEFINE_LDST_DMA(uw, w, 16, be);
274 DEFINE_LDST_DMA(l, l, 32, be);
275 DEFINE_LDST_DMA(q, q, 64, be);
276 
277 #undef DEFINE_LDST_DMA
278 
279 struct ScatterGatherEntry {
280     dma_addr_t base;
281     dma_addr_t len;
282 };
283 
284 void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
285                       AddressSpace *as);
286 void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
287 void qemu_sglist_destroy(QEMUSGList *qsg);
288 
289 typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
290                               BlockCompletionFunc *cb, void *cb_opaque,
291                               void *opaque);
292 
293 BlockAIOCB *dma_blk_io(AioContext *ctx,
294                        QEMUSGList *sg, uint64_t offset, uint32_t align,
295                        DMAIOFunc *io_func, void *io_func_opaque,
296                        BlockCompletionFunc *cb, void *opaque, DMADirection dir);
297 BlockAIOCB *dma_blk_read(BlockBackend *blk,
298                          QEMUSGList *sg, uint64_t offset, uint32_t align,
299                          BlockCompletionFunc *cb, void *opaque);
300 BlockAIOCB *dma_blk_write(BlockBackend *blk,
301                           QEMUSGList *sg, uint64_t offset, uint32_t align,
302                           BlockCompletionFunc *cb, void *opaque);
303 MemTxResult dma_buf_read(void *ptr, dma_addr_t len, dma_addr_t *residual,
304                          QEMUSGList *sg, MemTxAttrs attrs);
305 MemTxResult dma_buf_write(void *ptr, dma_addr_t len, dma_addr_t *residual,
306                           QEMUSGList *sg, MemTxAttrs attrs);
307 
308 void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
309                     QEMUSGList *sg, enum BlockAcctType type);
310 
311 /**
312  * dma_aligned_pow2_mask: Return the address bit mask of the largest
313  * power of 2 size less or equal than @end - @start + 1, aligned with @start,
314  * and bounded by 1 << @max_addr_bits bits.
315  *
316  * @start: range start address
317  * @end: range end address (greater than @start)
318  * @max_addr_bits: max address bits (<= 64)
319  */
320 uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
321                                int max_addr_bits);
322 
323 #endif
324