xref: /openbmc/qemu/include/sysemu/dma.h (revision 0fbb5d2d)
1 /*
2  * DMA helper functions
3  *
4  * Copyright (c) 2009, 2020 Red Hat
5  *
6  * This work is licensed under the terms of the GNU General Public License
7  * (GNU GPL), version 2 or later.
8  */
9 
10 #ifndef DMA_H
11 #define DMA_H
12 
13 #include "exec/memory.h"
14 #include "exec/address-spaces.h"
15 #include "block/block.h"
16 #include "block/accounting.h"
17 
18 typedef struct ScatterGatherEntry ScatterGatherEntry;
19 
20 typedef enum {
21     DMA_DIRECTION_TO_DEVICE = 0,
22     DMA_DIRECTION_FROM_DEVICE = 1,
23 } DMADirection;
24 
25 struct QEMUSGList {
26     ScatterGatherEntry *sg;
27     int nsg;
28     int nalloc;
29     size_t size;
30     DeviceState *dev;
31     AddressSpace *as;
32 };
33 
34 #ifndef CONFIG_USER_ONLY
35 
36 /*
37  * When an IOMMU is present, bus addresses become distinct from
38  * CPU/memory physical addresses and may be a different size.  Because
39  * the IOVA size depends more on the bus than on the platform, we more
40  * or less have to treat these as 64-bit always to cover all (or at
41  * least most) cases.
42  */
43 typedef uint64_t dma_addr_t;
44 
45 #define DMA_ADDR_BITS 64
46 #define DMA_ADDR_FMT "%" PRIx64
47 
48 static inline void dma_barrier(AddressSpace *as, DMADirection dir)
49 {
50     /*
51      * This is called before DMA read and write operations
52      * unless the _relaxed form is used and is responsible
53      * for providing some sane ordering of accesses vs
54      * concurrently running VCPUs.
55      *
56      * Users of map(), unmap() or lower level st/ld_*
57      * operations are responsible for providing their own
58      * ordering via barriers.
59      *
60      * This primitive implementation does a simple smp_mb()
61      * before each operation which provides pretty much full
62      * ordering.
63      *
64      * A smarter implementation can be devised if needed to
65      * use lighter barriers based on the direction of the
66      * transfer, the DMA context, etc...
67      */
68     smp_mb();
69 }
70 
71 /* Checks that the given range of addresses is valid for DMA.  This is
72  * useful for certain cases, but usually you should just use
73  * dma_memory_{read,write}() and check for errors */
74 static inline bool dma_memory_valid(AddressSpace *as,
75                                     dma_addr_t addr, dma_addr_t len,
76                                     DMADirection dir, MemTxAttrs attrs)
77 {
78     return address_space_access_valid(as, addr, len,
79                                       dir == DMA_DIRECTION_FROM_DEVICE,
80                                       attrs);
81 }
82 
83 static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as,
84                                                 dma_addr_t addr,
85                                                 void *buf, dma_addr_t len,
86                                                 DMADirection dir,
87                                                 MemTxAttrs attrs)
88 {
89     return address_space_rw(as, addr, attrs,
90                             buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
91 }
92 
93 static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as,
94                                                   dma_addr_t addr,
95                                                   void *buf, dma_addr_t len)
96 {
97     return dma_memory_rw_relaxed(as, addr, buf, len,
98                                  DMA_DIRECTION_TO_DEVICE,
99                                  MEMTXATTRS_UNSPECIFIED);
100 }
101 
102 static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as,
103                                                    dma_addr_t addr,
104                                                    const void *buf,
105                                                    dma_addr_t len)
106 {
107     return dma_memory_rw_relaxed(as, addr, (void *)buf, len,
108                                  DMA_DIRECTION_FROM_DEVICE,
109                                  MEMTXATTRS_UNSPECIFIED);
110 }
111 
112 /**
113  * dma_memory_rw: Read from or write to an address space from DMA controller.
114  *
115  * Return a MemTxResult indicating whether the operation succeeded
116  * or failed (eg unassigned memory, device rejected the transaction,
117  * IOMMU fault).
118  *
119  * @as: #AddressSpace to be accessed
120  * @addr: address within that address space
121  * @buf: buffer with the data transferred
122  * @len: the number of bytes to read or write
123  * @dir: indicates the transfer direction
124  * @attrs: memory transaction attributes
125  */
126 static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr,
127                                         void *buf, dma_addr_t len,
128                                         DMADirection dir, MemTxAttrs attrs)
129 {
130     dma_barrier(as, dir);
131 
132     return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs);
133 }
134 
135 /**
136  * dma_memory_read: Read from an address space from DMA controller.
137  *
138  * Return a MemTxResult indicating whether the operation succeeded
139  * or failed (eg unassigned memory, device rejected the transaction,
140  * IOMMU fault).  Called within RCU critical section.
141  *
142  * @as: #AddressSpace to be accessed
143  * @addr: address within that address space
144  * @buf: buffer with the data transferred
145  * @len: length of the data transferred
146  * @attrs: memory transaction attributes
147  */
148 static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr,
149                                           void *buf, dma_addr_t len,
150                                           MemTxAttrs attrs)
151 {
152     return dma_memory_rw(as, addr, buf, len,
153                          DMA_DIRECTION_TO_DEVICE, attrs);
154 }
155 
156 /**
157  * address_space_write: Write to address space from DMA controller.
158  *
159  * Return a MemTxResult indicating whether the operation succeeded
160  * or failed (eg unassigned memory, device rejected the transaction,
161  * IOMMU fault).
162  *
163  * @as: #AddressSpace to be accessed
164  * @addr: address within that address space
165  * @buf: buffer with the data transferred
166  * @len: the number of bytes to write
167  * @attrs: memory transaction attributes
168  */
169 static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr,
170                                            const void *buf, dma_addr_t len,
171                                            MemTxAttrs attrs)
172 {
173     return dma_memory_rw(as, addr, (void *)buf, len,
174                          DMA_DIRECTION_FROM_DEVICE, attrs);
175 }
176 
177 /**
178  * dma_memory_set: Fill memory with a constant byte from DMA controller.
179  *
180  * Return a MemTxResult indicating whether the operation succeeded
181  * or failed (eg unassigned memory, device rejected the transaction,
182  * IOMMU fault).
183  *
184  * @as: #AddressSpace to be accessed
185  * @addr: address within that address space
186  * @c: constant byte to fill the memory
187  * @len: the number of bytes to fill with the constant byte
188  * @attrs: memory transaction attributes
189  */
190 MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr,
191                            uint8_t c, dma_addr_t len, MemTxAttrs attrs);
192 
193 /**
194  * address_space_map: Map a physical memory region into a host virtual address.
195  *
196  * May map a subset of the requested range, given by and returned in @plen.
197  * May return %NULL and set *@plen to zero(0), if resources needed to perform
198  * the mapping are exhausted.
199  * Use only for reads OR writes - not for read-modify-write operations.
200  *
201  * @as: #AddressSpace to be accessed
202  * @addr: address within that address space
203  * @len: pointer to length of buffer; updated on return
204  * @dir: indicates the transfer direction
205  * @attrs: memory attributes
206  */
207 static inline void *dma_memory_map(AddressSpace *as,
208                                    dma_addr_t addr, dma_addr_t *len,
209                                    DMADirection dir, MemTxAttrs attrs)
210 {
211     hwaddr xlen = *len;
212     void *p;
213 
214     p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE,
215                           attrs);
216     *len = xlen;
217     return p;
218 }
219 
220 /**
221  * address_space_unmap: Unmaps a memory region previously mapped
222  *                      by dma_memory_map()
223  *
224  * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
225  * @access_len gives the amount of memory that was actually read or written
226  * by the caller.
227  *
228  * @as: #AddressSpace used
229  * @buffer: host pointer as returned by address_space_map()
230  * @len: buffer length as returned by address_space_map()
231  * @dir: indicates the transfer direction
232  * @access_len: amount of data actually transferred
233  */
234 static inline void dma_memory_unmap(AddressSpace *as,
235                                     void *buffer, dma_addr_t len,
236                                     DMADirection dir, dma_addr_t access_len)
237 {
238     address_space_unmap(as, buffer, (hwaddr)len,
239                         dir == DMA_DIRECTION_FROM_DEVICE, access_len);
240 }
241 
242 #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
243     static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \
244                                                         dma_addr_t addr, \
245                                                         uint##_bits##_t *pval, \
246                                                         MemTxAttrs attrs) \
247     { \
248         MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \
249         _end##_bits##_to_cpus(pval); \
250         return res; \
251     } \
252     static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \
253                                                         dma_addr_t addr, \
254                                                         uint##_bits##_t val, \
255                                                         MemTxAttrs attrs) \
256     { \
257         val = cpu_to_##_end##_bits(val); \
258         return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \
259     }
260 
261 static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr,
262                                    uint8_t *val, MemTxAttrs attrs)
263 {
264     return dma_memory_read(as, addr, val, 1, attrs);
265 }
266 
267 static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr,
268                                   uint8_t val, MemTxAttrs attrs)
269 {
270     return dma_memory_write(as, addr, &val, 1, attrs);
271 }
272 
273 DEFINE_LDST_DMA(uw, w, 16, le);
274 DEFINE_LDST_DMA(l, l, 32, le);
275 DEFINE_LDST_DMA(q, q, 64, le);
276 DEFINE_LDST_DMA(uw, w, 16, be);
277 DEFINE_LDST_DMA(l, l, 32, be);
278 DEFINE_LDST_DMA(q, q, 64, be);
279 
280 #undef DEFINE_LDST_DMA
281 
282 struct ScatterGatherEntry {
283     dma_addr_t base;
284     dma_addr_t len;
285 };
286 
287 void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
288                       AddressSpace *as);
289 void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
290 void qemu_sglist_destroy(QEMUSGList *qsg);
291 #endif
292 
293 typedef BlockAIOCB *DMAIOFunc(int64_t offset, QEMUIOVector *iov,
294                               BlockCompletionFunc *cb, void *cb_opaque,
295                               void *opaque);
296 
297 BlockAIOCB *dma_blk_io(AioContext *ctx,
298                        QEMUSGList *sg, uint64_t offset, uint32_t align,
299                        DMAIOFunc *io_func, void *io_func_opaque,
300                        BlockCompletionFunc *cb, void *opaque, DMADirection dir);
301 BlockAIOCB *dma_blk_read(BlockBackend *blk,
302                          QEMUSGList *sg, uint64_t offset, uint32_t align,
303                          BlockCompletionFunc *cb, void *opaque);
304 BlockAIOCB *dma_blk_write(BlockBackend *blk,
305                           QEMUSGList *sg, uint64_t offset, uint32_t align,
306                           BlockCompletionFunc *cb, void *opaque);
307 uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
308 uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs);
309 
310 void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
311                     QEMUSGList *sg, enum BlockAcctType type);
312 
313 /**
314  * dma_aligned_pow2_mask: Return the address bit mask of the largest
315  * power of 2 size less or equal than @end - @start + 1, aligned with @start,
316  * and bounded by 1 << @max_addr_bits bits.
317  *
318  * @start: range start address
319  * @end: range end address (greater than @start)
320  * @max_addr_bits: max address bits (<= 64)
321  */
322 uint64_t dma_aligned_pow2_mask(uint64_t start, uint64_t end,
323                                int max_addr_bits);
324 
325 #endif
326