xref: /openbmc/qemu/hw/dma/xlnx-zdma.c (revision db725815985654007ade0fd53590d613fd657208)
1 /*
2  * QEMU model of the ZynqMP generic DMA
3  *
4  * Copyright (c) 2014 Xilinx Inc.
5  * Copyright (c) 2018 FEIMTECH AB
6  *
7  * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
8  *            Francisco Iglesias <francisco.iglesias@feimtech.se>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "hw/dma/xlnx-zdma.h"
31 #include "hw/irq.h"
32 #include "migration/vmstate.h"
33 #include "qemu/bitops.h"
34 #include "qemu/log.h"
35 #include "qemu/module.h"
36 #include "qapi/error.h"
37 
38 #ifndef XLNX_ZDMA_ERR_DEBUG
39 #define XLNX_ZDMA_ERR_DEBUG 0
40 #endif
41 
42 REG32(ZDMA_ERR_CTRL, 0x0)
43     FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
44 REG32(ZDMA_CH_ISR, 0x100)
45     FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
46     FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
47     FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
48     FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
49     FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
50     FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
51     FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
52     FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
53     FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
54     FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
55     FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
56     FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
57 REG32(ZDMA_CH_IMR, 0x104)
58     FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
59     FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
60     FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
61     FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
62     FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
63     FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
64     FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
65     FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
66     FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
67     FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
68     FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
69     FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
70 REG32(ZDMA_CH_IEN, 0x108)
71     FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
72     FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
73     FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
74     FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
75     FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
76     FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
77     FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
78     FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
79     FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
80     FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
81     FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
82     FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
83 REG32(ZDMA_CH_IDS, 0x10c)
84     FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
85     FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
86     FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
87     FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
88     FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
89     FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
90     FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
91     FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
92     FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
93     FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
94     FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
95     FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
96 REG32(ZDMA_CH_CTRL0, 0x110)
97     FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
98     FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
99     FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
100     FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
101     FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
102     FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
103 REG32(ZDMA_CH_CTRL1, 0x114)
104     FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
105     FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
106 REG32(ZDMA_CH_FCI, 0x118)
107     FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
108     FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
109     FIELD(ZDMA_CH_FCI, EN, 0, 1)
110 REG32(ZDMA_CH_STATUS, 0x11c)
111     FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
112 REG32(ZDMA_CH_DATA_ATTR, 0x120)
113     FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
114     FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
115     FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
116     FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
117     FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
118     FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
119     FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
120     FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
121 REG32(ZDMA_CH_DSCR_ATTR, 0x124)
122     FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
123     FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
124     FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
125 REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
126 REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
127     FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
128 REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
129     FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
130 REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
131     FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
132     FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
133     FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
134     FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
135 REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
136 REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
137     FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
138 REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
139     FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
140 REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
141     FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
142     FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
143     FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
144 REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
145 REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
146 REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
147 REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
148 REG32(ZDMA_CH_SRC_START_LSB, 0x158)
149 REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
150     FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
151 REG32(ZDMA_CH_DST_START_LSB, 0x160)
152 REG32(ZDMA_CH_DST_START_MSB, 0x164)
153     FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
154 REG32(ZDMA_CH_RATE_CTRL, 0x18c)
155     FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
156 REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
157 REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
158     FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
159 REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
160 REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
161     FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
162 REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
163 REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
164     FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
165 REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
166 REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
167     FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
168 REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
169 REG32(ZDMA_CH_RATE_CNTL, 0x18c)
170     FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
171 REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
172     FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
173 REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
174     FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
175 REG32(ZDMA_CH_DBG0, 0x198)
176     FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
177 REG32(ZDMA_CH_DBG1, 0x19c)
178     FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
179 REG32(ZDMA_CH_CTRL2, 0x200)
180     FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
181 
182 enum {
183     PT_REG = 0,
184     PT_MEM = 1,
185 };
186 
187 enum {
188     CMD_HALT = 1,
189     CMD_STOP = 2,
190 };
191 
192 enum {
193     RW_MODE_RW = 0,
194     RW_MODE_WO = 1,
195     RW_MODE_RO = 2,
196 };
197 
198 enum {
199     DTYPE_LINEAR = 0,
200     DTYPE_LINKED = 1,
201 };
202 
203 enum {
204     AXI_BURST_FIXED = 0,
205     AXI_BURST_INCR  = 1,
206 };
207 
208 static void zdma_ch_imr_update_irq(XlnxZDMA *s)
209 {
210     bool pending;
211 
212     pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
213 
214     qemu_set_irq(s->irq_zdma_ch_imr, pending);
215 }
216 
217 static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
218 {
219     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
220     zdma_ch_imr_update_irq(s);
221 }
222 
223 static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
224 {
225     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
226     uint32_t val = val64;
227 
228     s->regs[R_ZDMA_CH_IMR] &= ~val;
229     zdma_ch_imr_update_irq(s);
230     return 0;
231 }
232 
233 static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
234 {
235     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
236     uint32_t val = val64;
237 
238     s->regs[R_ZDMA_CH_IMR] |= val;
239     zdma_ch_imr_update_irq(s);
240     return 0;
241 }
242 
243 static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
244 {
245     s->state = state;
246     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
247 
248     /* Signal error if we have an error condition.  */
249     if (s->error) {
250         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
251     }
252 }
253 
254 static void zdma_src_done(XlnxZDMA *s)
255 {
256     unsigned int cnt;
257     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
258     cnt++;
259     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
260     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
261 
262     /* Did we overflow?  */
263     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
264         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
265     }
266     zdma_ch_imr_update_irq(s);
267 }
268 
269 static void zdma_dst_done(XlnxZDMA *s)
270 {
271     unsigned int cnt;
272     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
273     cnt++;
274     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
275     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
276 
277     /* Did we overflow?  */
278     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
279         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
280     }
281     zdma_ch_imr_update_irq(s);
282 }
283 
284 static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
285 {
286     uint64_t addr;
287 
288     addr = s->regs[basereg + 1];
289     addr <<= 32;
290     addr |= s->regs[basereg];
291 
292     return addr;
293 }
294 
295 static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
296 {
297     s->regs[basereg] = addr;
298     s->regs[basereg + 1] = addr >> 32;
299 }
300 
301 static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
302 {
303     /* ZDMA descriptors must be aligned to their own size.  */
304     if (addr % sizeof(XlnxZDMADescr)) {
305         qemu_log_mask(LOG_GUEST_ERROR,
306                       "zdma: unaligned descriptor at %" PRIx64,
307                       addr);
308         memset(buf, 0x0, sizeof(XlnxZDMADescr));
309         s->error = true;
310         return false;
311     }
312 
313     address_space_rw(s->dma_as, addr, s->attr,
314                      buf, sizeof(XlnxZDMADescr), false);
315     return true;
316 }
317 
318 static void zdma_load_src_descriptor(XlnxZDMA *s)
319 {
320     uint64_t src_addr;
321     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
322 
323     if (ptype == PT_REG) {
324         memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0],
325                sizeof(s->dsc_src));
326         return;
327     }
328 
329     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
330 
331     if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
332         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
333     }
334 }
335 
336 static void zdma_load_dst_descriptor(XlnxZDMA *s)
337 {
338     uint64_t dst_addr;
339     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
340 
341     if (ptype == PT_REG) {
342         memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0],
343                sizeof(s->dsc_dst));
344         return;
345     }
346 
347     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
348 
349     if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
350         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
351     }
352 }
353 
354 static uint64_t zdma_update_descr_addr(XlnxZDMA *s, bool type,
355                                        unsigned int basereg)
356 {
357     uint64_t addr, next;
358 
359     if (type == DTYPE_LINEAR) {
360         next = zdma_get_regaddr64(s, basereg);
361         next += sizeof(s->dsc_dst);
362         zdma_put_regaddr64(s, basereg, next);
363     } else {
364         addr = zdma_get_regaddr64(s, basereg);
365         addr += sizeof(s->dsc_dst);
366         address_space_rw(s->dma_as, addr, s->attr, (void *) &next, 8, false);
367         zdma_put_regaddr64(s, basereg, next);
368     }
369     return next;
370 }
371 
372 static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
373 {
374     uint32_t dst_size, dlen;
375     bool dst_intr, dst_type;
376     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
377     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
378     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
379                                                AWBURST);
380 
381     /* FIXED burst types are only supported in simple dma mode.  */
382     if (ptype != PT_REG) {
383         burst_type = AXI_BURST_INCR;
384     }
385 
386     while (len) {
387         dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
388                               SIZE);
389         dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
390                               TYPE);
391         if (dst_size == 0 && ptype == PT_MEM) {
392             uint64_t next;
393             next = zdma_update_descr_addr(s, dst_type,
394                                           R_ZDMA_CH_DST_CUR_DSCR_LSB);
395             zdma_load_descriptor(s, next, &s->dsc_dst);
396             dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
397                                   SIZE);
398             dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
399                                   TYPE);
400         }
401 
402         /* Match what hardware does by ignoring the dst_size and only using
403          * the src size for Simple register mode.  */
404         if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
405             dst_size = len;
406         }
407 
408         dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
409                               INTR);
410 
411         dlen = len > dst_size ? dst_size : len;
412         if (burst_type == AXI_BURST_FIXED) {
413             if (dlen > (s->cfg.bus_width / 8)) {
414                 dlen = s->cfg.bus_width / 8;
415             }
416         }
417 
418         address_space_rw(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen,
419                          true);
420         if (burst_type == AXI_BURST_INCR) {
421             s->dsc_dst.addr += dlen;
422         }
423         dst_size -= dlen;
424         buf += dlen;
425         len -= dlen;
426 
427         if (dst_size == 0 && dst_intr) {
428             zdma_dst_done(s);
429         }
430 
431         /* Write back to buffered descriptor.  */
432         s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
433                                          ZDMA_CH_DST_DSCR_WORD2,
434                                          SIZE,
435                                          dst_size);
436     }
437 }
438 
439 static void zdma_process_descr(XlnxZDMA *s)
440 {
441     uint64_t src_addr;
442     uint32_t src_size, len;
443     unsigned int src_cmd;
444     bool src_intr, src_type;
445     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
446     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
447     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
448                                                ARBURST);
449 
450     src_addr = s->dsc_src.addr;
451     src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
452     src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
453     src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
454     src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
455 
456     /* FIXED burst types and non-rw modes are only supported in
457      * simple dma mode.
458      */
459     if (ptype != PT_REG) {
460         if (rw_mode != RW_MODE_RW) {
461             qemu_log_mask(LOG_GUEST_ERROR,
462                           "zDMA: rw-mode=%d but not simple DMA mode.\n",
463                           rw_mode);
464         }
465         if (burst_type != AXI_BURST_INCR) {
466             qemu_log_mask(LOG_GUEST_ERROR,
467                           "zDMA: burst_type=%d but not simple DMA mode.\n",
468                           burst_type);
469         }
470         burst_type = AXI_BURST_INCR;
471         rw_mode = RW_MODE_RW;
472     }
473 
474     if (rw_mode == RW_MODE_WO) {
475         /* In Simple DMA Write-Only, we need to push DST size bytes
476          * regardless of what SRC size is set to.  */
477         src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
478                               SIZE);
479         memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
480     }
481 
482     while (src_size) {
483         len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
484         if (burst_type == AXI_BURST_FIXED) {
485             if (len > (s->cfg.bus_width / 8)) {
486                 len = s->cfg.bus_width / 8;
487             }
488         }
489 
490         if (rw_mode == RW_MODE_WO) {
491             if (len > s->cfg.bus_width / 8) {
492                 len = s->cfg.bus_width / 8;
493             }
494         } else {
495             address_space_rw(s->dma_as, src_addr, s->attr, s->buf, len,
496                              false);
497             if (burst_type == AXI_BURST_INCR) {
498                 src_addr += len;
499             }
500         }
501 
502         if (rw_mode != RW_MODE_RO) {
503             zdma_write_dst(s, s->buf, len);
504         }
505 
506         s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
507         src_size -= len;
508     }
509 
510     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
511 
512     if (src_intr) {
513         zdma_src_done(s);
514     }
515 
516     /* Load next descriptor.  */
517     if (ptype == PT_REG || src_cmd == CMD_STOP) {
518         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
519         zdma_set_state(s, DISABLED);
520         return;
521     }
522 
523     if (src_cmd == CMD_HALT) {
524         zdma_set_state(s, PAUSED);
525         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
526         zdma_ch_imr_update_irq(s);
527         return;
528     }
529 
530     zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
531 }
532 
533 static void zdma_run(XlnxZDMA *s)
534 {
535     while (s->state == ENABLED && !s->error) {
536         zdma_load_src_descriptor(s);
537 
538         if (s->error) {
539             zdma_set_state(s, DISABLED);
540         } else {
541             zdma_process_descr(s);
542         }
543     }
544 
545     zdma_ch_imr_update_irq(s);
546 }
547 
548 static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
549 {
550     uint64_t src_addr, dst_addr;
551 
552     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
553     zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
554     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
555     zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
556     zdma_load_dst_descriptor(s);
557 }
558 
559 static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
560 {
561     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
562 
563     if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
564         s->error = false;
565 
566         if (s->state == PAUSED &&
567             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
568             if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
569                 zdma_update_descr_addr_from_start(s);
570             } else {
571                 bool src_type = FIELD_EX32(s->dsc_src.words[3],
572                                        ZDMA_CH_SRC_DSCR_WORD3, TYPE);
573                 zdma_update_descr_addr(s, src_type,
574                                           R_ZDMA_CH_SRC_CUR_DSCR_LSB);
575             }
576             ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
577             zdma_set_state(s, ENABLED);
578         } else if (s->state == DISABLED) {
579             zdma_update_descr_addr_from_start(s);
580             zdma_set_state(s, ENABLED);
581         }
582     } else {
583         /* Leave Paused state?  */
584         if (s->state == PAUSED &&
585             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
586             zdma_set_state(s, DISABLED);
587         }
588     }
589 
590     zdma_run(s);
591 }
592 
593 static RegisterAccessInfo zdma_regs_info[] = {
594     {   .name = "ZDMA_ERR_CTRL",  .addr = A_ZDMA_ERR_CTRL,
595         .rsvd = 0xfffffffe,
596     },{ .name = "ZDMA_CH_ISR",  .addr = A_ZDMA_CH_ISR,
597         .rsvd = 0xfffff000,
598         .w1c = 0xfff,
599         .post_write = zdma_ch_isr_postw,
600     },{ .name = "ZDMA_CH_IMR",  .addr = A_ZDMA_CH_IMR,
601         .reset = 0xfff,
602         .rsvd = 0xfffff000,
603         .ro = 0xfff,
604     },{ .name = "ZDMA_CH_IEN",  .addr = A_ZDMA_CH_IEN,
605         .rsvd = 0xfffff000,
606         .pre_write = zdma_ch_ien_prew,
607     },{ .name = "ZDMA_CH_IDS",  .addr = A_ZDMA_CH_IDS,
608         .rsvd = 0xfffff000,
609         .pre_write = zdma_ch_ids_prew,
610     },{ .name = "ZDMA_CH_CTRL0",  .addr = A_ZDMA_CH_CTRL0,
611         .reset = 0x80,
612         .rsvd = 0xffffff01,
613         .post_write = zdma_ch_ctrlx_postw,
614     },{ .name = "ZDMA_CH_CTRL1",  .addr = A_ZDMA_CH_CTRL1,
615         .reset = 0x3ff,
616         .rsvd = 0xfffffc00,
617     },{ .name = "ZDMA_CH_FCI",  .addr = A_ZDMA_CH_FCI,
618         .rsvd = 0xffffffc0,
619     },{ .name = "ZDMA_CH_STATUS",  .addr = A_ZDMA_CH_STATUS,
620         .rsvd = 0xfffffffc,
621         .ro = 0x3,
622     },{ .name = "ZDMA_CH_DATA_ATTR",  .addr = A_ZDMA_CH_DATA_ATTR,
623         .reset = 0x483d20f,
624         .rsvd = 0xf0000000,
625     },{ .name = "ZDMA_CH_DSCR_ATTR",  .addr = A_ZDMA_CH_DSCR_ATTR,
626         .rsvd = 0xfffffe00,
627     },{ .name = "ZDMA_CH_SRC_DSCR_WORD0",  .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
628     },{ .name = "ZDMA_CH_SRC_DSCR_WORD1",  .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
629         .rsvd = 0xfffe0000,
630     },{ .name = "ZDMA_CH_SRC_DSCR_WORD2",  .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
631         .rsvd = 0xc0000000,
632     },{ .name = "ZDMA_CH_SRC_DSCR_WORD3",  .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
633         .rsvd = 0xffffffe0,
634     },{ .name = "ZDMA_CH_DST_DSCR_WORD0",  .addr = A_ZDMA_CH_DST_DSCR_WORD0,
635     },{ .name = "ZDMA_CH_DST_DSCR_WORD1",  .addr = A_ZDMA_CH_DST_DSCR_WORD1,
636         .rsvd = 0xfffe0000,
637     },{ .name = "ZDMA_CH_DST_DSCR_WORD2",  .addr = A_ZDMA_CH_DST_DSCR_WORD2,
638         .rsvd = 0xc0000000,
639     },{ .name = "ZDMA_CH_DST_DSCR_WORD3",  .addr = A_ZDMA_CH_DST_DSCR_WORD3,
640         .rsvd = 0xfffffffa,
641     },{ .name = "ZDMA_CH_WR_ONLY_WORD0",  .addr = A_ZDMA_CH_WR_ONLY_WORD0,
642     },{ .name = "ZDMA_CH_WR_ONLY_WORD1",  .addr = A_ZDMA_CH_WR_ONLY_WORD1,
643     },{ .name = "ZDMA_CH_WR_ONLY_WORD2",  .addr = A_ZDMA_CH_WR_ONLY_WORD2,
644     },{ .name = "ZDMA_CH_WR_ONLY_WORD3",  .addr = A_ZDMA_CH_WR_ONLY_WORD3,
645     },{ .name = "ZDMA_CH_SRC_START_LSB",  .addr = A_ZDMA_CH_SRC_START_LSB,
646     },{ .name = "ZDMA_CH_SRC_START_MSB",  .addr = A_ZDMA_CH_SRC_START_MSB,
647         .rsvd = 0xfffe0000,
648     },{ .name = "ZDMA_CH_DST_START_LSB",  .addr = A_ZDMA_CH_DST_START_LSB,
649     },{ .name = "ZDMA_CH_DST_START_MSB",  .addr = A_ZDMA_CH_DST_START_MSB,
650         .rsvd = 0xfffe0000,
651     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
652         .ro = 0xffffffff,
653     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
654         .rsvd = 0xfffe0000,
655         .ro = 0x1ffff,
656     },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
657         .ro = 0xffffffff,
658     },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
659         .rsvd = 0xfffe0000,
660         .ro = 0x1ffff,
661     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
662         .ro = 0xffffffff,
663     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
664         .rsvd = 0xfffe0000,
665         .ro = 0x1ffff,
666     },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
667         .ro = 0xffffffff,
668     },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
669         .rsvd = 0xfffe0000,
670         .ro = 0x1ffff,
671     },{ .name = "ZDMA_CH_TOTAL_BYTE",  .addr = A_ZDMA_CH_TOTAL_BYTE,
672         .w1c = 0xffffffff,
673     },{ .name = "ZDMA_CH_RATE_CNTL",  .addr = A_ZDMA_CH_RATE_CNTL,
674         .rsvd = 0xfffff000,
675     },{ .name = "ZDMA_CH_IRQ_SRC_ACCT",  .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
676         .rsvd = 0xffffff00,
677         .ro = 0xff,
678         .cor = 0xff,
679     },{ .name = "ZDMA_CH_IRQ_DST_ACCT",  .addr = A_ZDMA_CH_IRQ_DST_ACCT,
680         .rsvd = 0xffffff00,
681         .ro = 0xff,
682         .cor = 0xff,
683     },{ .name = "ZDMA_CH_DBG0",  .addr = A_ZDMA_CH_DBG0,
684         .rsvd = 0xfffffe00,
685         .ro = 0x1ff,
686     },{ .name = "ZDMA_CH_DBG1",  .addr = A_ZDMA_CH_DBG1,
687         .rsvd = 0xfffffe00,
688         .ro = 0x1ff,
689     },{ .name = "ZDMA_CH_CTRL2",  .addr = A_ZDMA_CH_CTRL2,
690         .rsvd = 0xfffffffe,
691         .post_write = zdma_ch_ctrlx_postw,
692     }
693 };
694 
695 static void zdma_reset(DeviceState *dev)
696 {
697     XlnxZDMA *s = XLNX_ZDMA(dev);
698     unsigned int i;
699 
700     for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
701         register_reset(&s->regs_info[i]);
702     }
703 
704     zdma_ch_imr_update_irq(s);
705 }
706 
707 static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
708 {
709     XlnxZDMA *s = XLNX_ZDMA(opaque);
710     RegisterInfo *r = &s->regs_info[addr / 4];
711 
712     if (!r->data) {
713         gchar *path = object_get_canonical_path(OBJECT(s));
714         qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
715                  path,
716                  addr);
717         g_free(path);
718         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
719         zdma_ch_imr_update_irq(s);
720         return 0;
721     }
722     return register_read(r, ~0, NULL, false);
723 }
724 
725 static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
726                       unsigned size)
727 {
728     XlnxZDMA *s = XLNX_ZDMA(opaque);
729     RegisterInfo *r = &s->regs_info[addr / 4];
730 
731     if (!r->data) {
732         gchar *path = object_get_canonical_path(OBJECT(s));
733         qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
734                  path,
735                  addr, value);
736         g_free(path);
737         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
738         zdma_ch_imr_update_irq(s);
739         return;
740     }
741     register_write(r, value, ~0, NULL, false);
742 }
743 
744 static const MemoryRegionOps zdma_ops = {
745     .read = zdma_read,
746     .write = zdma_write,
747     .endianness = DEVICE_LITTLE_ENDIAN,
748     .valid = {
749         .min_access_size = 4,
750         .max_access_size = 4,
751     },
752 };
753 
754 static void zdma_realize(DeviceState *dev, Error **errp)
755 {
756     XlnxZDMA *s = XLNX_ZDMA(dev);
757     unsigned int i;
758 
759     for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
760         RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
761 
762         *r = (RegisterInfo) {
763             .data = (uint8_t *)&s->regs[
764                     zdma_regs_info[i].addr / 4],
765             .data_size = sizeof(uint32_t),
766             .access = &zdma_regs_info[i],
767             .opaque = s,
768         };
769     }
770 
771     if (s->dma_mr) {
772         s->dma_as = g_malloc0(sizeof(AddressSpace));
773         address_space_init(s->dma_as, s->dma_mr, NULL);
774     } else {
775         s->dma_as = &address_space_memory;
776     }
777     s->attr = MEMTXATTRS_UNSPECIFIED;
778 }
779 
780 static void zdma_init(Object *obj)
781 {
782     XlnxZDMA *s = XLNX_ZDMA(obj);
783     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
784 
785     memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
786                           TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
787     sysbus_init_mmio(sbd, &s->iomem);
788     sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
789 
790     object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
791                              (Object **)&s->dma_mr,
792                              qdev_prop_allow_set_link_before_realize,
793                              OBJ_PROP_LINK_STRONG,
794                              &error_abort);
795 }
796 
797 static const VMStateDescription vmstate_zdma = {
798     .name = TYPE_XLNX_ZDMA,
799     .version_id = 1,
800     .minimum_version_id = 1,
801     .minimum_version_id_old = 1,
802     .fields = (VMStateField[]) {
803         VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
804         VMSTATE_UINT32(state, XlnxZDMA),
805         VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
806         VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
807         VMSTATE_END_OF_LIST(),
808     }
809 };
810 
811 static Property zdma_props[] = {
812     DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
813     DEFINE_PROP_END_OF_LIST(),
814 };
815 
816 static void zdma_class_init(ObjectClass *klass, void *data)
817 {
818     DeviceClass *dc = DEVICE_CLASS(klass);
819 
820     dc->reset = zdma_reset;
821     dc->realize = zdma_realize;
822     dc->props = zdma_props;
823     dc->vmsd = &vmstate_zdma;
824 }
825 
826 static const TypeInfo zdma_info = {
827     .name          = TYPE_XLNX_ZDMA,
828     .parent        = TYPE_SYS_BUS_DEVICE,
829     .instance_size = sizeof(XlnxZDMA),
830     .class_init    = zdma_class_init,
831     .instance_init = zdma_init,
832 };
833 
834 static void zdma_register_types(void)
835 {
836     type_register_static(&zdma_info);
837 }
838 
839 type_init(zdma_register_types)
840