xref: /openbmc/qemu/hw/dma/xlnx-zdma.c (revision 8e6fe6b8)
1 /*
2  * QEMU model of the ZynqMP generic DMA
3  *
4  * Copyright (c) 2014 Xilinx Inc.
5  * Copyright (c) 2018 FEIMTECH AB
6  *
7  * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
8  *            Francisco Iglesias <francisco.iglesias@feimtech.se>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "hw/dma/xlnx-zdma.h"
31 #include "qemu/bitops.h"
32 #include "qemu/log.h"
33 #include "qemu/module.h"
34 #include "qapi/error.h"
35 
36 #ifndef XLNX_ZDMA_ERR_DEBUG
37 #define XLNX_ZDMA_ERR_DEBUG 0
38 #endif
39 
40 REG32(ZDMA_ERR_CTRL, 0x0)
41     FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
42 REG32(ZDMA_CH_ISR, 0x100)
43     FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
44     FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
45     FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
46     FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
47     FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
48     FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
49     FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
50     FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
51     FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
52     FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
53     FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
54     FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
55 REG32(ZDMA_CH_IMR, 0x104)
56     FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
57     FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
58     FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
59     FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
60     FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
61     FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
62     FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
63     FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
64     FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
65     FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
66     FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
67     FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
68 REG32(ZDMA_CH_IEN, 0x108)
69     FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
70     FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
71     FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
72     FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
73     FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
74     FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
75     FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
76     FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
77     FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
78     FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
79     FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
80     FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
81 REG32(ZDMA_CH_IDS, 0x10c)
82     FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
83     FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
84     FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
85     FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
86     FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
87     FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
88     FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
89     FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
90     FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
91     FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
92     FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
93     FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
94 REG32(ZDMA_CH_CTRL0, 0x110)
95     FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
96     FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
97     FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
98     FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
99     FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
100     FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
101 REG32(ZDMA_CH_CTRL1, 0x114)
102     FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
103     FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
104 REG32(ZDMA_CH_FCI, 0x118)
105     FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
106     FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
107     FIELD(ZDMA_CH_FCI, EN, 0, 1)
108 REG32(ZDMA_CH_STATUS, 0x11c)
109     FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
110 REG32(ZDMA_CH_DATA_ATTR, 0x120)
111     FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
112     FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
113     FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
114     FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
115     FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
116     FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
117     FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
118     FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
119 REG32(ZDMA_CH_DSCR_ATTR, 0x124)
120     FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
121     FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
122     FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
123 REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
124 REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
125     FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
126 REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
127     FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
128 REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
129     FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
130     FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
131     FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
132     FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
133 REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
134 REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
135     FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
136 REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
137     FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
138 REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
139     FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
140     FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
141     FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
142 REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
143 REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
144 REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
145 REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
146 REG32(ZDMA_CH_SRC_START_LSB, 0x158)
147 REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
148     FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
149 REG32(ZDMA_CH_DST_START_LSB, 0x160)
150 REG32(ZDMA_CH_DST_START_MSB, 0x164)
151     FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
152 REG32(ZDMA_CH_RATE_CTRL, 0x18c)
153     FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
154 REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
155 REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
156     FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
157 REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
158 REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
159     FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
160 REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
161 REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
162     FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
163 REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
164 REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
165     FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
166 REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
167 REG32(ZDMA_CH_RATE_CNTL, 0x18c)
168     FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
169 REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
170     FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
171 REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
172     FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
173 REG32(ZDMA_CH_DBG0, 0x198)
174     FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
175 REG32(ZDMA_CH_DBG1, 0x19c)
176     FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
177 REG32(ZDMA_CH_CTRL2, 0x200)
178     FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
179 
180 enum {
181     PT_REG = 0,
182     PT_MEM = 1,
183 };
184 
185 enum {
186     CMD_HALT = 1,
187     CMD_STOP = 2,
188 };
189 
190 enum {
191     RW_MODE_RW = 0,
192     RW_MODE_WO = 1,
193     RW_MODE_RO = 2,
194 };
195 
196 enum {
197     DTYPE_LINEAR = 0,
198     DTYPE_LINKED = 1,
199 };
200 
201 enum {
202     AXI_BURST_FIXED = 0,
203     AXI_BURST_INCR  = 1,
204 };
205 
206 static void zdma_ch_imr_update_irq(XlnxZDMA *s)
207 {
208     bool pending;
209 
210     pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
211 
212     qemu_set_irq(s->irq_zdma_ch_imr, pending);
213 }
214 
215 static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
216 {
217     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
218     zdma_ch_imr_update_irq(s);
219 }
220 
221 static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
222 {
223     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
224     uint32_t val = val64;
225 
226     s->regs[R_ZDMA_CH_IMR] &= ~val;
227     zdma_ch_imr_update_irq(s);
228     return 0;
229 }
230 
231 static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
232 {
233     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
234     uint32_t val = val64;
235 
236     s->regs[R_ZDMA_CH_IMR] |= val;
237     zdma_ch_imr_update_irq(s);
238     return 0;
239 }
240 
241 static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
242 {
243     s->state = state;
244     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
245 
246     /* Signal error if we have an error condition.  */
247     if (s->error) {
248         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
249     }
250 }
251 
252 static void zdma_src_done(XlnxZDMA *s)
253 {
254     unsigned int cnt;
255     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
256     cnt++;
257     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
258     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
259 
260     /* Did we overflow?  */
261     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
262         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
263     }
264     zdma_ch_imr_update_irq(s);
265 }
266 
267 static void zdma_dst_done(XlnxZDMA *s)
268 {
269     unsigned int cnt;
270     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
271     cnt++;
272     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
273     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
274 
275     /* Did we overflow?  */
276     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
277         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
278     }
279     zdma_ch_imr_update_irq(s);
280 }
281 
282 static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
283 {
284     uint64_t addr;
285 
286     addr = s->regs[basereg + 1];
287     addr <<= 32;
288     addr |= s->regs[basereg];
289 
290     return addr;
291 }
292 
293 static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
294 {
295     s->regs[basereg] = addr;
296     s->regs[basereg + 1] = addr >> 32;
297 }
298 
299 static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
300 {
301     /* ZDMA descriptors must be aligned to their own size.  */
302     if (addr % sizeof(XlnxZDMADescr)) {
303         qemu_log_mask(LOG_GUEST_ERROR,
304                       "zdma: unaligned descriptor at %" PRIx64,
305                       addr);
306         memset(buf, 0x0, sizeof(XlnxZDMADescr));
307         s->error = true;
308         return false;
309     }
310 
311     address_space_rw(s->dma_as, addr, s->attr,
312                      buf, sizeof(XlnxZDMADescr), false);
313     return true;
314 }
315 
316 static void zdma_load_src_descriptor(XlnxZDMA *s)
317 {
318     uint64_t src_addr;
319     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
320 
321     if (ptype == PT_REG) {
322         memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0],
323                sizeof(s->dsc_src));
324         return;
325     }
326 
327     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
328 
329     if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
330         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
331     }
332 }
333 
334 static void zdma_load_dst_descriptor(XlnxZDMA *s)
335 {
336     uint64_t dst_addr;
337     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
338 
339     if (ptype == PT_REG) {
340         memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0],
341                sizeof(s->dsc_dst));
342         return;
343     }
344 
345     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
346 
347     if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
348         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
349     }
350 }
351 
352 static uint64_t zdma_update_descr_addr(XlnxZDMA *s, bool type,
353                                        unsigned int basereg)
354 {
355     uint64_t addr, next;
356 
357     if (type == DTYPE_LINEAR) {
358         next = zdma_get_regaddr64(s, basereg);
359         next += sizeof(s->dsc_dst);
360         zdma_put_regaddr64(s, basereg, next);
361     } else {
362         addr = zdma_get_regaddr64(s, basereg);
363         addr += sizeof(s->dsc_dst);
364         address_space_rw(s->dma_as, addr, s->attr, (void *) &next, 8, false);
365         zdma_put_regaddr64(s, basereg, next);
366     }
367     return next;
368 }
369 
370 static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
371 {
372     uint32_t dst_size, dlen;
373     bool dst_intr, dst_type;
374     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
375     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
376     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
377                                                AWBURST);
378 
379     /* FIXED burst types are only supported in simple dma mode.  */
380     if (ptype != PT_REG) {
381         burst_type = AXI_BURST_INCR;
382     }
383 
384     while (len) {
385         dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
386                               SIZE);
387         dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
388                               TYPE);
389         if (dst_size == 0 && ptype == PT_MEM) {
390             uint64_t next;
391             next = zdma_update_descr_addr(s, dst_type,
392                                           R_ZDMA_CH_DST_CUR_DSCR_LSB);
393             zdma_load_descriptor(s, next, &s->dsc_dst);
394             dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
395                                   SIZE);
396             dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
397                                   TYPE);
398         }
399 
400         /* Match what hardware does by ignoring the dst_size and only using
401          * the src size for Simple register mode.  */
402         if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
403             dst_size = len;
404         }
405 
406         dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
407                               INTR);
408 
409         dlen = len > dst_size ? dst_size : len;
410         if (burst_type == AXI_BURST_FIXED) {
411             if (dlen > (s->cfg.bus_width / 8)) {
412                 dlen = s->cfg.bus_width / 8;
413             }
414         }
415 
416         address_space_rw(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen,
417                          true);
418         if (burst_type == AXI_BURST_INCR) {
419             s->dsc_dst.addr += dlen;
420         }
421         dst_size -= dlen;
422         buf += dlen;
423         len -= dlen;
424 
425         if (dst_size == 0 && dst_intr) {
426             zdma_dst_done(s);
427         }
428 
429         /* Write back to buffered descriptor.  */
430         s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
431                                          ZDMA_CH_DST_DSCR_WORD2,
432                                          SIZE,
433                                          dst_size);
434     }
435 }
436 
437 static void zdma_process_descr(XlnxZDMA *s)
438 {
439     uint64_t src_addr;
440     uint32_t src_size, len;
441     unsigned int src_cmd;
442     bool src_intr, src_type;
443     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
444     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
445     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
446                                                ARBURST);
447 
448     src_addr = s->dsc_src.addr;
449     src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
450     src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
451     src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
452     src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
453 
454     /* FIXED burst types and non-rw modes are only supported in
455      * simple dma mode.
456      */
457     if (ptype != PT_REG) {
458         if (rw_mode != RW_MODE_RW) {
459             qemu_log_mask(LOG_GUEST_ERROR,
460                           "zDMA: rw-mode=%d but not simple DMA mode.\n",
461                           rw_mode);
462         }
463         if (burst_type != AXI_BURST_INCR) {
464             qemu_log_mask(LOG_GUEST_ERROR,
465                           "zDMA: burst_type=%d but not simple DMA mode.\n",
466                           burst_type);
467         }
468         burst_type = AXI_BURST_INCR;
469         rw_mode = RW_MODE_RW;
470     }
471 
472     if (rw_mode == RW_MODE_WO) {
473         /* In Simple DMA Write-Only, we need to push DST size bytes
474          * regardless of what SRC size is set to.  */
475         src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
476                               SIZE);
477         memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
478     }
479 
480     while (src_size) {
481         len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
482         if (burst_type == AXI_BURST_FIXED) {
483             if (len > (s->cfg.bus_width / 8)) {
484                 len = s->cfg.bus_width / 8;
485             }
486         }
487 
488         if (rw_mode == RW_MODE_WO) {
489             if (len > s->cfg.bus_width / 8) {
490                 len = s->cfg.bus_width / 8;
491             }
492         } else {
493             address_space_rw(s->dma_as, src_addr, s->attr, s->buf, len,
494                              false);
495             if (burst_type == AXI_BURST_INCR) {
496                 src_addr += len;
497             }
498         }
499 
500         if (rw_mode != RW_MODE_RO) {
501             zdma_write_dst(s, s->buf, len);
502         }
503 
504         s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
505         src_size -= len;
506     }
507 
508     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
509 
510     if (src_intr) {
511         zdma_src_done(s);
512     }
513 
514     /* Load next descriptor.  */
515     if (ptype == PT_REG || src_cmd == CMD_STOP) {
516         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
517         zdma_set_state(s, DISABLED);
518         return;
519     }
520 
521     if (src_cmd == CMD_HALT) {
522         zdma_set_state(s, PAUSED);
523         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
524         zdma_ch_imr_update_irq(s);
525         return;
526     }
527 
528     zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
529 }
530 
531 static void zdma_run(XlnxZDMA *s)
532 {
533     while (s->state == ENABLED && !s->error) {
534         zdma_load_src_descriptor(s);
535 
536         if (s->error) {
537             zdma_set_state(s, DISABLED);
538         } else {
539             zdma_process_descr(s);
540         }
541     }
542 
543     zdma_ch_imr_update_irq(s);
544 }
545 
546 static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
547 {
548     uint64_t src_addr, dst_addr;
549 
550     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
551     zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
552     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
553     zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
554     zdma_load_dst_descriptor(s);
555 }
556 
557 static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
558 {
559     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
560 
561     if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
562         s->error = false;
563 
564         if (s->state == PAUSED &&
565             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
566             if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
567                 zdma_update_descr_addr_from_start(s);
568             } else {
569                 bool src_type = FIELD_EX32(s->dsc_src.words[3],
570                                        ZDMA_CH_SRC_DSCR_WORD3, TYPE);
571                 zdma_update_descr_addr(s, src_type,
572                                           R_ZDMA_CH_SRC_CUR_DSCR_LSB);
573             }
574             ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
575             zdma_set_state(s, ENABLED);
576         } else if (s->state == DISABLED) {
577             zdma_update_descr_addr_from_start(s);
578             zdma_set_state(s, ENABLED);
579         }
580     } else {
581         /* Leave Paused state?  */
582         if (s->state == PAUSED &&
583             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
584             zdma_set_state(s, DISABLED);
585         }
586     }
587 
588     zdma_run(s);
589 }
590 
591 static RegisterAccessInfo zdma_regs_info[] = {
592     {   .name = "ZDMA_ERR_CTRL",  .addr = A_ZDMA_ERR_CTRL,
593         .rsvd = 0xfffffffe,
594     },{ .name = "ZDMA_CH_ISR",  .addr = A_ZDMA_CH_ISR,
595         .rsvd = 0xfffff000,
596         .w1c = 0xfff,
597         .post_write = zdma_ch_isr_postw,
598     },{ .name = "ZDMA_CH_IMR",  .addr = A_ZDMA_CH_IMR,
599         .reset = 0xfff,
600         .rsvd = 0xfffff000,
601         .ro = 0xfff,
602     },{ .name = "ZDMA_CH_IEN",  .addr = A_ZDMA_CH_IEN,
603         .rsvd = 0xfffff000,
604         .pre_write = zdma_ch_ien_prew,
605     },{ .name = "ZDMA_CH_IDS",  .addr = A_ZDMA_CH_IDS,
606         .rsvd = 0xfffff000,
607         .pre_write = zdma_ch_ids_prew,
608     },{ .name = "ZDMA_CH_CTRL0",  .addr = A_ZDMA_CH_CTRL0,
609         .reset = 0x80,
610         .rsvd = 0xffffff01,
611         .post_write = zdma_ch_ctrlx_postw,
612     },{ .name = "ZDMA_CH_CTRL1",  .addr = A_ZDMA_CH_CTRL1,
613         .reset = 0x3ff,
614         .rsvd = 0xfffffc00,
615     },{ .name = "ZDMA_CH_FCI",  .addr = A_ZDMA_CH_FCI,
616         .rsvd = 0xffffffc0,
617     },{ .name = "ZDMA_CH_STATUS",  .addr = A_ZDMA_CH_STATUS,
618         .rsvd = 0xfffffffc,
619         .ro = 0x3,
620     },{ .name = "ZDMA_CH_DATA_ATTR",  .addr = A_ZDMA_CH_DATA_ATTR,
621         .reset = 0x483d20f,
622         .rsvd = 0xf0000000,
623     },{ .name = "ZDMA_CH_DSCR_ATTR",  .addr = A_ZDMA_CH_DSCR_ATTR,
624         .rsvd = 0xfffffe00,
625     },{ .name = "ZDMA_CH_SRC_DSCR_WORD0",  .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
626     },{ .name = "ZDMA_CH_SRC_DSCR_WORD1",  .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
627         .rsvd = 0xfffe0000,
628     },{ .name = "ZDMA_CH_SRC_DSCR_WORD2",  .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
629         .rsvd = 0xc0000000,
630     },{ .name = "ZDMA_CH_SRC_DSCR_WORD3",  .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
631         .rsvd = 0xffffffe0,
632     },{ .name = "ZDMA_CH_DST_DSCR_WORD0",  .addr = A_ZDMA_CH_DST_DSCR_WORD0,
633     },{ .name = "ZDMA_CH_DST_DSCR_WORD1",  .addr = A_ZDMA_CH_DST_DSCR_WORD1,
634         .rsvd = 0xfffe0000,
635     },{ .name = "ZDMA_CH_DST_DSCR_WORD2",  .addr = A_ZDMA_CH_DST_DSCR_WORD2,
636         .rsvd = 0xc0000000,
637     },{ .name = "ZDMA_CH_DST_DSCR_WORD3",  .addr = A_ZDMA_CH_DST_DSCR_WORD3,
638         .rsvd = 0xfffffffa,
639     },{ .name = "ZDMA_CH_WR_ONLY_WORD0",  .addr = A_ZDMA_CH_WR_ONLY_WORD0,
640     },{ .name = "ZDMA_CH_WR_ONLY_WORD1",  .addr = A_ZDMA_CH_WR_ONLY_WORD1,
641     },{ .name = "ZDMA_CH_WR_ONLY_WORD2",  .addr = A_ZDMA_CH_WR_ONLY_WORD2,
642     },{ .name = "ZDMA_CH_WR_ONLY_WORD3",  .addr = A_ZDMA_CH_WR_ONLY_WORD3,
643     },{ .name = "ZDMA_CH_SRC_START_LSB",  .addr = A_ZDMA_CH_SRC_START_LSB,
644     },{ .name = "ZDMA_CH_SRC_START_MSB",  .addr = A_ZDMA_CH_SRC_START_MSB,
645         .rsvd = 0xfffe0000,
646     },{ .name = "ZDMA_CH_DST_START_LSB",  .addr = A_ZDMA_CH_DST_START_LSB,
647     },{ .name = "ZDMA_CH_DST_START_MSB",  .addr = A_ZDMA_CH_DST_START_MSB,
648         .rsvd = 0xfffe0000,
649     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
650         .ro = 0xffffffff,
651     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
652         .rsvd = 0xfffe0000,
653         .ro = 0x1ffff,
654     },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
655         .ro = 0xffffffff,
656     },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
657         .rsvd = 0xfffe0000,
658         .ro = 0x1ffff,
659     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
660         .ro = 0xffffffff,
661     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
662         .rsvd = 0xfffe0000,
663         .ro = 0x1ffff,
664     },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
665         .ro = 0xffffffff,
666     },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
667         .rsvd = 0xfffe0000,
668         .ro = 0x1ffff,
669     },{ .name = "ZDMA_CH_TOTAL_BYTE",  .addr = A_ZDMA_CH_TOTAL_BYTE,
670         .w1c = 0xffffffff,
671     },{ .name = "ZDMA_CH_RATE_CNTL",  .addr = A_ZDMA_CH_RATE_CNTL,
672         .rsvd = 0xfffff000,
673     },{ .name = "ZDMA_CH_IRQ_SRC_ACCT",  .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
674         .rsvd = 0xffffff00,
675         .ro = 0xff,
676         .cor = 0xff,
677     },{ .name = "ZDMA_CH_IRQ_DST_ACCT",  .addr = A_ZDMA_CH_IRQ_DST_ACCT,
678         .rsvd = 0xffffff00,
679         .ro = 0xff,
680         .cor = 0xff,
681     },{ .name = "ZDMA_CH_DBG0",  .addr = A_ZDMA_CH_DBG0,
682         .rsvd = 0xfffffe00,
683         .ro = 0x1ff,
684     },{ .name = "ZDMA_CH_DBG1",  .addr = A_ZDMA_CH_DBG1,
685         .rsvd = 0xfffffe00,
686         .ro = 0x1ff,
687     },{ .name = "ZDMA_CH_CTRL2",  .addr = A_ZDMA_CH_CTRL2,
688         .rsvd = 0xfffffffe,
689         .post_write = zdma_ch_ctrlx_postw,
690     }
691 };
692 
693 static void zdma_reset(DeviceState *dev)
694 {
695     XlnxZDMA *s = XLNX_ZDMA(dev);
696     unsigned int i;
697 
698     for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
699         register_reset(&s->regs_info[i]);
700     }
701 
702     zdma_ch_imr_update_irq(s);
703 }
704 
705 static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
706 {
707     XlnxZDMA *s = XLNX_ZDMA(opaque);
708     RegisterInfo *r = &s->regs_info[addr / 4];
709 
710     if (!r->data) {
711         gchar *path = object_get_canonical_path(OBJECT(s));
712         qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
713                  path,
714                  addr);
715         g_free(path);
716         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
717         zdma_ch_imr_update_irq(s);
718         return 0;
719     }
720     return register_read(r, ~0, NULL, false);
721 }
722 
723 static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
724                       unsigned size)
725 {
726     XlnxZDMA *s = XLNX_ZDMA(opaque);
727     RegisterInfo *r = &s->regs_info[addr / 4];
728 
729     if (!r->data) {
730         gchar *path = object_get_canonical_path(OBJECT(s));
731         qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
732                  path,
733                  addr, value);
734         g_free(path);
735         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
736         zdma_ch_imr_update_irq(s);
737         return;
738     }
739     register_write(r, value, ~0, NULL, false);
740 }
741 
742 static const MemoryRegionOps zdma_ops = {
743     .read = zdma_read,
744     .write = zdma_write,
745     .endianness = DEVICE_LITTLE_ENDIAN,
746     .valid = {
747         .min_access_size = 4,
748         .max_access_size = 4,
749     },
750 };
751 
752 static void zdma_realize(DeviceState *dev, Error **errp)
753 {
754     XlnxZDMA *s = XLNX_ZDMA(dev);
755     unsigned int i;
756 
757     for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
758         RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
759 
760         *r = (RegisterInfo) {
761             .data = (uint8_t *)&s->regs[
762                     zdma_regs_info[i].addr / 4],
763             .data_size = sizeof(uint32_t),
764             .access = &zdma_regs_info[i],
765             .opaque = s,
766         };
767     }
768 
769     if (s->dma_mr) {
770         s->dma_as = g_malloc0(sizeof(AddressSpace));
771         address_space_init(s->dma_as, s->dma_mr, NULL);
772     } else {
773         s->dma_as = &address_space_memory;
774     }
775     s->attr = MEMTXATTRS_UNSPECIFIED;
776 }
777 
778 static void zdma_init(Object *obj)
779 {
780     XlnxZDMA *s = XLNX_ZDMA(obj);
781     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
782 
783     memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
784                           TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
785     sysbus_init_mmio(sbd, &s->iomem);
786     sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
787 
788     object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
789                              (Object **)&s->dma_mr,
790                              qdev_prop_allow_set_link_before_realize,
791                              OBJ_PROP_LINK_STRONG,
792                              &error_abort);
793 }
794 
795 static const VMStateDescription vmstate_zdma = {
796     .name = TYPE_XLNX_ZDMA,
797     .version_id = 1,
798     .minimum_version_id = 1,
799     .minimum_version_id_old = 1,
800     .fields = (VMStateField[]) {
801         VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
802         VMSTATE_UINT32(state, XlnxZDMA),
803         VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
804         VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
805         VMSTATE_END_OF_LIST(),
806     }
807 };
808 
809 static Property zdma_props[] = {
810     DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
811     DEFINE_PROP_END_OF_LIST(),
812 };
813 
814 static void zdma_class_init(ObjectClass *klass, void *data)
815 {
816     DeviceClass *dc = DEVICE_CLASS(klass);
817 
818     dc->reset = zdma_reset;
819     dc->realize = zdma_realize;
820     dc->props = zdma_props;
821     dc->vmsd = &vmstate_zdma;
822 }
823 
824 static const TypeInfo zdma_info = {
825     .name          = TYPE_XLNX_ZDMA,
826     .parent        = TYPE_SYS_BUS_DEVICE,
827     .instance_size = sizeof(XlnxZDMA),
828     .class_init    = zdma_class_init,
829     .instance_init = zdma_init,
830 };
831 
832 static void zdma_register_types(void)
833 {
834     type_register_static(&zdma_info);
835 }
836 
837 type_init(zdma_register_types)
838