xref: /openbmc/qemu/hw/dma/xlnx-zdma.c (revision 6a0acfff)
1 /*
2  * QEMU model of the ZynqMP generic DMA
3  *
4  * Copyright (c) 2014 Xilinx Inc.
5  * Copyright (c) 2018 FEIMTECH AB
6  *
7  * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
8  *            Francisco Iglesias <francisco.iglesias@feimtech.se>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 
29 #include "qemu/osdep.h"
30 #include "hw/dma/xlnx-zdma.h"
31 #include "hw/irq.h"
32 #include "qemu/bitops.h"
33 #include "qemu/log.h"
34 #include "qemu/module.h"
35 #include "qapi/error.h"
36 
37 #ifndef XLNX_ZDMA_ERR_DEBUG
38 #define XLNX_ZDMA_ERR_DEBUG 0
39 #endif
40 
41 REG32(ZDMA_ERR_CTRL, 0x0)
42     FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
43 REG32(ZDMA_CH_ISR, 0x100)
44     FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
45     FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
46     FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
47     FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
48     FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
49     FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
50     FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
51     FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
52     FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
53     FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
54     FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
55     FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
56 REG32(ZDMA_CH_IMR, 0x104)
57     FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
58     FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
59     FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
60     FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
61     FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
62     FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
63     FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
64     FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
65     FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
66     FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
67     FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
68     FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
69 REG32(ZDMA_CH_IEN, 0x108)
70     FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
71     FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
72     FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
73     FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
74     FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
75     FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
76     FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
77     FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
78     FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
79     FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
80     FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
81     FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
82 REG32(ZDMA_CH_IDS, 0x10c)
83     FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
84     FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
85     FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
86     FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
87     FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
88     FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
89     FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
90     FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
91     FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
92     FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
93     FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
94     FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
95 REG32(ZDMA_CH_CTRL0, 0x110)
96     FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
97     FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
98     FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
99     FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
100     FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
101     FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
102 REG32(ZDMA_CH_CTRL1, 0x114)
103     FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
104     FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
105 REG32(ZDMA_CH_FCI, 0x118)
106     FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
107     FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
108     FIELD(ZDMA_CH_FCI, EN, 0, 1)
109 REG32(ZDMA_CH_STATUS, 0x11c)
110     FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
111 REG32(ZDMA_CH_DATA_ATTR, 0x120)
112     FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
113     FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
114     FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
115     FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
116     FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
117     FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
118     FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
119     FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
120 REG32(ZDMA_CH_DSCR_ATTR, 0x124)
121     FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
122     FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
123     FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
124 REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
125 REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
126     FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
127 REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
128     FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
129 REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
130     FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
131     FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
132     FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
133     FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
134 REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
135 REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
136     FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
137 REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
138     FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
139 REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
140     FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
141     FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
142     FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
143 REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
144 REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
145 REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
146 REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
147 REG32(ZDMA_CH_SRC_START_LSB, 0x158)
148 REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
149     FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
150 REG32(ZDMA_CH_DST_START_LSB, 0x160)
151 REG32(ZDMA_CH_DST_START_MSB, 0x164)
152     FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
153 REG32(ZDMA_CH_RATE_CTRL, 0x18c)
154     FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
155 REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
156 REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
157     FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
158 REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
159 REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
160     FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
161 REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
162 REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
163     FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
164 REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
165 REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
166     FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
167 REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
168 REG32(ZDMA_CH_RATE_CNTL, 0x18c)
169     FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
170 REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
171     FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
172 REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
173     FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
174 REG32(ZDMA_CH_DBG0, 0x198)
175     FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
176 REG32(ZDMA_CH_DBG1, 0x19c)
177     FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
178 REG32(ZDMA_CH_CTRL2, 0x200)
179     FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
180 
181 enum {
182     PT_REG = 0,
183     PT_MEM = 1,
184 };
185 
186 enum {
187     CMD_HALT = 1,
188     CMD_STOP = 2,
189 };
190 
191 enum {
192     RW_MODE_RW = 0,
193     RW_MODE_WO = 1,
194     RW_MODE_RO = 2,
195 };
196 
197 enum {
198     DTYPE_LINEAR = 0,
199     DTYPE_LINKED = 1,
200 };
201 
202 enum {
203     AXI_BURST_FIXED = 0,
204     AXI_BURST_INCR  = 1,
205 };
206 
207 static void zdma_ch_imr_update_irq(XlnxZDMA *s)
208 {
209     bool pending;
210 
211     pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
212 
213     qemu_set_irq(s->irq_zdma_ch_imr, pending);
214 }
215 
216 static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
217 {
218     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
219     zdma_ch_imr_update_irq(s);
220 }
221 
222 static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
223 {
224     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
225     uint32_t val = val64;
226 
227     s->regs[R_ZDMA_CH_IMR] &= ~val;
228     zdma_ch_imr_update_irq(s);
229     return 0;
230 }
231 
232 static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
233 {
234     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
235     uint32_t val = val64;
236 
237     s->regs[R_ZDMA_CH_IMR] |= val;
238     zdma_ch_imr_update_irq(s);
239     return 0;
240 }
241 
242 static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
243 {
244     s->state = state;
245     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
246 
247     /* Signal error if we have an error condition.  */
248     if (s->error) {
249         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
250     }
251 }
252 
253 static void zdma_src_done(XlnxZDMA *s)
254 {
255     unsigned int cnt;
256     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
257     cnt++;
258     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
259     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
260 
261     /* Did we overflow?  */
262     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
263         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
264     }
265     zdma_ch_imr_update_irq(s);
266 }
267 
268 static void zdma_dst_done(XlnxZDMA *s)
269 {
270     unsigned int cnt;
271     cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
272     cnt++;
273     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
274     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
275 
276     /* Did we overflow?  */
277     if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
278         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
279     }
280     zdma_ch_imr_update_irq(s);
281 }
282 
283 static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
284 {
285     uint64_t addr;
286 
287     addr = s->regs[basereg + 1];
288     addr <<= 32;
289     addr |= s->regs[basereg];
290 
291     return addr;
292 }
293 
294 static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
295 {
296     s->regs[basereg] = addr;
297     s->regs[basereg + 1] = addr >> 32;
298 }
299 
300 static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
301 {
302     /* ZDMA descriptors must be aligned to their own size.  */
303     if (addr % sizeof(XlnxZDMADescr)) {
304         qemu_log_mask(LOG_GUEST_ERROR,
305                       "zdma: unaligned descriptor at %" PRIx64,
306                       addr);
307         memset(buf, 0x0, sizeof(XlnxZDMADescr));
308         s->error = true;
309         return false;
310     }
311 
312     address_space_rw(s->dma_as, addr, s->attr,
313                      buf, sizeof(XlnxZDMADescr), false);
314     return true;
315 }
316 
317 static void zdma_load_src_descriptor(XlnxZDMA *s)
318 {
319     uint64_t src_addr;
320     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
321 
322     if (ptype == PT_REG) {
323         memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0],
324                sizeof(s->dsc_src));
325         return;
326     }
327 
328     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
329 
330     if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
331         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
332     }
333 }
334 
335 static void zdma_load_dst_descriptor(XlnxZDMA *s)
336 {
337     uint64_t dst_addr;
338     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
339 
340     if (ptype == PT_REG) {
341         memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0],
342                sizeof(s->dsc_dst));
343         return;
344     }
345 
346     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
347 
348     if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
349         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
350     }
351 }
352 
353 static uint64_t zdma_update_descr_addr(XlnxZDMA *s, bool type,
354                                        unsigned int basereg)
355 {
356     uint64_t addr, next;
357 
358     if (type == DTYPE_LINEAR) {
359         next = zdma_get_regaddr64(s, basereg);
360         next += sizeof(s->dsc_dst);
361         zdma_put_regaddr64(s, basereg, next);
362     } else {
363         addr = zdma_get_regaddr64(s, basereg);
364         addr += sizeof(s->dsc_dst);
365         address_space_rw(s->dma_as, addr, s->attr, (void *) &next, 8, false);
366         zdma_put_regaddr64(s, basereg, next);
367     }
368     return next;
369 }
370 
371 static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
372 {
373     uint32_t dst_size, dlen;
374     bool dst_intr, dst_type;
375     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
376     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
377     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
378                                                AWBURST);
379 
380     /* FIXED burst types are only supported in simple dma mode.  */
381     if (ptype != PT_REG) {
382         burst_type = AXI_BURST_INCR;
383     }
384 
385     while (len) {
386         dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
387                               SIZE);
388         dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
389                               TYPE);
390         if (dst_size == 0 && ptype == PT_MEM) {
391             uint64_t next;
392             next = zdma_update_descr_addr(s, dst_type,
393                                           R_ZDMA_CH_DST_CUR_DSCR_LSB);
394             zdma_load_descriptor(s, next, &s->dsc_dst);
395             dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
396                                   SIZE);
397             dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
398                                   TYPE);
399         }
400 
401         /* Match what hardware does by ignoring the dst_size and only using
402          * the src size for Simple register mode.  */
403         if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
404             dst_size = len;
405         }
406 
407         dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
408                               INTR);
409 
410         dlen = len > dst_size ? dst_size : len;
411         if (burst_type == AXI_BURST_FIXED) {
412             if (dlen > (s->cfg.bus_width / 8)) {
413                 dlen = s->cfg.bus_width / 8;
414             }
415         }
416 
417         address_space_rw(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen,
418                          true);
419         if (burst_type == AXI_BURST_INCR) {
420             s->dsc_dst.addr += dlen;
421         }
422         dst_size -= dlen;
423         buf += dlen;
424         len -= dlen;
425 
426         if (dst_size == 0 && dst_intr) {
427             zdma_dst_done(s);
428         }
429 
430         /* Write back to buffered descriptor.  */
431         s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
432                                          ZDMA_CH_DST_DSCR_WORD2,
433                                          SIZE,
434                                          dst_size);
435     }
436 }
437 
438 static void zdma_process_descr(XlnxZDMA *s)
439 {
440     uint64_t src_addr;
441     uint32_t src_size, len;
442     unsigned int src_cmd;
443     bool src_intr, src_type;
444     unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
445     unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
446     unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
447                                                ARBURST);
448 
449     src_addr = s->dsc_src.addr;
450     src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
451     src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
452     src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
453     src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
454 
455     /* FIXED burst types and non-rw modes are only supported in
456      * simple dma mode.
457      */
458     if (ptype != PT_REG) {
459         if (rw_mode != RW_MODE_RW) {
460             qemu_log_mask(LOG_GUEST_ERROR,
461                           "zDMA: rw-mode=%d but not simple DMA mode.\n",
462                           rw_mode);
463         }
464         if (burst_type != AXI_BURST_INCR) {
465             qemu_log_mask(LOG_GUEST_ERROR,
466                           "zDMA: burst_type=%d but not simple DMA mode.\n",
467                           burst_type);
468         }
469         burst_type = AXI_BURST_INCR;
470         rw_mode = RW_MODE_RW;
471     }
472 
473     if (rw_mode == RW_MODE_WO) {
474         /* In Simple DMA Write-Only, we need to push DST size bytes
475          * regardless of what SRC size is set to.  */
476         src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
477                               SIZE);
478         memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
479     }
480 
481     while (src_size) {
482         len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
483         if (burst_type == AXI_BURST_FIXED) {
484             if (len > (s->cfg.bus_width / 8)) {
485                 len = s->cfg.bus_width / 8;
486             }
487         }
488 
489         if (rw_mode == RW_MODE_WO) {
490             if (len > s->cfg.bus_width / 8) {
491                 len = s->cfg.bus_width / 8;
492             }
493         } else {
494             address_space_rw(s->dma_as, src_addr, s->attr, s->buf, len,
495                              false);
496             if (burst_type == AXI_BURST_INCR) {
497                 src_addr += len;
498             }
499         }
500 
501         if (rw_mode != RW_MODE_RO) {
502             zdma_write_dst(s, s->buf, len);
503         }
504 
505         s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
506         src_size -= len;
507     }
508 
509     ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
510 
511     if (src_intr) {
512         zdma_src_done(s);
513     }
514 
515     /* Load next descriptor.  */
516     if (ptype == PT_REG || src_cmd == CMD_STOP) {
517         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
518         zdma_set_state(s, DISABLED);
519         return;
520     }
521 
522     if (src_cmd == CMD_HALT) {
523         zdma_set_state(s, PAUSED);
524         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
525         zdma_ch_imr_update_irq(s);
526         return;
527     }
528 
529     zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
530 }
531 
532 static void zdma_run(XlnxZDMA *s)
533 {
534     while (s->state == ENABLED && !s->error) {
535         zdma_load_src_descriptor(s);
536 
537         if (s->error) {
538             zdma_set_state(s, DISABLED);
539         } else {
540             zdma_process_descr(s);
541         }
542     }
543 
544     zdma_ch_imr_update_irq(s);
545 }
546 
547 static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
548 {
549     uint64_t src_addr, dst_addr;
550 
551     src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
552     zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
553     dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
554     zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
555     zdma_load_dst_descriptor(s);
556 }
557 
558 static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
559 {
560     XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
561 
562     if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
563         s->error = false;
564 
565         if (s->state == PAUSED &&
566             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
567             if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
568                 zdma_update_descr_addr_from_start(s);
569             } else {
570                 bool src_type = FIELD_EX32(s->dsc_src.words[3],
571                                        ZDMA_CH_SRC_DSCR_WORD3, TYPE);
572                 zdma_update_descr_addr(s, src_type,
573                                           R_ZDMA_CH_SRC_CUR_DSCR_LSB);
574             }
575             ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
576             zdma_set_state(s, ENABLED);
577         } else if (s->state == DISABLED) {
578             zdma_update_descr_addr_from_start(s);
579             zdma_set_state(s, ENABLED);
580         }
581     } else {
582         /* Leave Paused state?  */
583         if (s->state == PAUSED &&
584             ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
585             zdma_set_state(s, DISABLED);
586         }
587     }
588 
589     zdma_run(s);
590 }
591 
592 static RegisterAccessInfo zdma_regs_info[] = {
593     {   .name = "ZDMA_ERR_CTRL",  .addr = A_ZDMA_ERR_CTRL,
594         .rsvd = 0xfffffffe,
595     },{ .name = "ZDMA_CH_ISR",  .addr = A_ZDMA_CH_ISR,
596         .rsvd = 0xfffff000,
597         .w1c = 0xfff,
598         .post_write = zdma_ch_isr_postw,
599     },{ .name = "ZDMA_CH_IMR",  .addr = A_ZDMA_CH_IMR,
600         .reset = 0xfff,
601         .rsvd = 0xfffff000,
602         .ro = 0xfff,
603     },{ .name = "ZDMA_CH_IEN",  .addr = A_ZDMA_CH_IEN,
604         .rsvd = 0xfffff000,
605         .pre_write = zdma_ch_ien_prew,
606     },{ .name = "ZDMA_CH_IDS",  .addr = A_ZDMA_CH_IDS,
607         .rsvd = 0xfffff000,
608         .pre_write = zdma_ch_ids_prew,
609     },{ .name = "ZDMA_CH_CTRL0",  .addr = A_ZDMA_CH_CTRL0,
610         .reset = 0x80,
611         .rsvd = 0xffffff01,
612         .post_write = zdma_ch_ctrlx_postw,
613     },{ .name = "ZDMA_CH_CTRL1",  .addr = A_ZDMA_CH_CTRL1,
614         .reset = 0x3ff,
615         .rsvd = 0xfffffc00,
616     },{ .name = "ZDMA_CH_FCI",  .addr = A_ZDMA_CH_FCI,
617         .rsvd = 0xffffffc0,
618     },{ .name = "ZDMA_CH_STATUS",  .addr = A_ZDMA_CH_STATUS,
619         .rsvd = 0xfffffffc,
620         .ro = 0x3,
621     },{ .name = "ZDMA_CH_DATA_ATTR",  .addr = A_ZDMA_CH_DATA_ATTR,
622         .reset = 0x483d20f,
623         .rsvd = 0xf0000000,
624     },{ .name = "ZDMA_CH_DSCR_ATTR",  .addr = A_ZDMA_CH_DSCR_ATTR,
625         .rsvd = 0xfffffe00,
626     },{ .name = "ZDMA_CH_SRC_DSCR_WORD0",  .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
627     },{ .name = "ZDMA_CH_SRC_DSCR_WORD1",  .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
628         .rsvd = 0xfffe0000,
629     },{ .name = "ZDMA_CH_SRC_DSCR_WORD2",  .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
630         .rsvd = 0xc0000000,
631     },{ .name = "ZDMA_CH_SRC_DSCR_WORD3",  .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
632         .rsvd = 0xffffffe0,
633     },{ .name = "ZDMA_CH_DST_DSCR_WORD0",  .addr = A_ZDMA_CH_DST_DSCR_WORD0,
634     },{ .name = "ZDMA_CH_DST_DSCR_WORD1",  .addr = A_ZDMA_CH_DST_DSCR_WORD1,
635         .rsvd = 0xfffe0000,
636     },{ .name = "ZDMA_CH_DST_DSCR_WORD2",  .addr = A_ZDMA_CH_DST_DSCR_WORD2,
637         .rsvd = 0xc0000000,
638     },{ .name = "ZDMA_CH_DST_DSCR_WORD3",  .addr = A_ZDMA_CH_DST_DSCR_WORD3,
639         .rsvd = 0xfffffffa,
640     },{ .name = "ZDMA_CH_WR_ONLY_WORD0",  .addr = A_ZDMA_CH_WR_ONLY_WORD0,
641     },{ .name = "ZDMA_CH_WR_ONLY_WORD1",  .addr = A_ZDMA_CH_WR_ONLY_WORD1,
642     },{ .name = "ZDMA_CH_WR_ONLY_WORD2",  .addr = A_ZDMA_CH_WR_ONLY_WORD2,
643     },{ .name = "ZDMA_CH_WR_ONLY_WORD3",  .addr = A_ZDMA_CH_WR_ONLY_WORD3,
644     },{ .name = "ZDMA_CH_SRC_START_LSB",  .addr = A_ZDMA_CH_SRC_START_LSB,
645     },{ .name = "ZDMA_CH_SRC_START_MSB",  .addr = A_ZDMA_CH_SRC_START_MSB,
646         .rsvd = 0xfffe0000,
647     },{ .name = "ZDMA_CH_DST_START_LSB",  .addr = A_ZDMA_CH_DST_START_LSB,
648     },{ .name = "ZDMA_CH_DST_START_MSB",  .addr = A_ZDMA_CH_DST_START_MSB,
649         .rsvd = 0xfffe0000,
650     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
651         .ro = 0xffffffff,
652     },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
653         .rsvd = 0xfffe0000,
654         .ro = 0x1ffff,
655     },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
656         .ro = 0xffffffff,
657     },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
658         .rsvd = 0xfffe0000,
659         .ro = 0x1ffff,
660     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
661         .ro = 0xffffffff,
662     },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
663         .rsvd = 0xfffe0000,
664         .ro = 0x1ffff,
665     },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
666         .ro = 0xffffffff,
667     },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
668         .rsvd = 0xfffe0000,
669         .ro = 0x1ffff,
670     },{ .name = "ZDMA_CH_TOTAL_BYTE",  .addr = A_ZDMA_CH_TOTAL_BYTE,
671         .w1c = 0xffffffff,
672     },{ .name = "ZDMA_CH_RATE_CNTL",  .addr = A_ZDMA_CH_RATE_CNTL,
673         .rsvd = 0xfffff000,
674     },{ .name = "ZDMA_CH_IRQ_SRC_ACCT",  .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
675         .rsvd = 0xffffff00,
676         .ro = 0xff,
677         .cor = 0xff,
678     },{ .name = "ZDMA_CH_IRQ_DST_ACCT",  .addr = A_ZDMA_CH_IRQ_DST_ACCT,
679         .rsvd = 0xffffff00,
680         .ro = 0xff,
681         .cor = 0xff,
682     },{ .name = "ZDMA_CH_DBG0",  .addr = A_ZDMA_CH_DBG0,
683         .rsvd = 0xfffffe00,
684         .ro = 0x1ff,
685     },{ .name = "ZDMA_CH_DBG1",  .addr = A_ZDMA_CH_DBG1,
686         .rsvd = 0xfffffe00,
687         .ro = 0x1ff,
688     },{ .name = "ZDMA_CH_CTRL2",  .addr = A_ZDMA_CH_CTRL2,
689         .rsvd = 0xfffffffe,
690         .post_write = zdma_ch_ctrlx_postw,
691     }
692 };
693 
694 static void zdma_reset(DeviceState *dev)
695 {
696     XlnxZDMA *s = XLNX_ZDMA(dev);
697     unsigned int i;
698 
699     for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
700         register_reset(&s->regs_info[i]);
701     }
702 
703     zdma_ch_imr_update_irq(s);
704 }
705 
706 static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
707 {
708     XlnxZDMA *s = XLNX_ZDMA(opaque);
709     RegisterInfo *r = &s->regs_info[addr / 4];
710 
711     if (!r->data) {
712         gchar *path = object_get_canonical_path(OBJECT(s));
713         qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
714                  path,
715                  addr);
716         g_free(path);
717         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
718         zdma_ch_imr_update_irq(s);
719         return 0;
720     }
721     return register_read(r, ~0, NULL, false);
722 }
723 
724 static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
725                       unsigned size)
726 {
727     XlnxZDMA *s = XLNX_ZDMA(opaque);
728     RegisterInfo *r = &s->regs_info[addr / 4];
729 
730     if (!r->data) {
731         gchar *path = object_get_canonical_path(OBJECT(s));
732         qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
733                  path,
734                  addr, value);
735         g_free(path);
736         ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
737         zdma_ch_imr_update_irq(s);
738         return;
739     }
740     register_write(r, value, ~0, NULL, false);
741 }
742 
743 static const MemoryRegionOps zdma_ops = {
744     .read = zdma_read,
745     .write = zdma_write,
746     .endianness = DEVICE_LITTLE_ENDIAN,
747     .valid = {
748         .min_access_size = 4,
749         .max_access_size = 4,
750     },
751 };
752 
753 static void zdma_realize(DeviceState *dev, Error **errp)
754 {
755     XlnxZDMA *s = XLNX_ZDMA(dev);
756     unsigned int i;
757 
758     for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
759         RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
760 
761         *r = (RegisterInfo) {
762             .data = (uint8_t *)&s->regs[
763                     zdma_regs_info[i].addr / 4],
764             .data_size = sizeof(uint32_t),
765             .access = &zdma_regs_info[i],
766             .opaque = s,
767         };
768     }
769 
770     if (s->dma_mr) {
771         s->dma_as = g_malloc0(sizeof(AddressSpace));
772         address_space_init(s->dma_as, s->dma_mr, NULL);
773     } else {
774         s->dma_as = &address_space_memory;
775     }
776     s->attr = MEMTXATTRS_UNSPECIFIED;
777 }
778 
779 static void zdma_init(Object *obj)
780 {
781     XlnxZDMA *s = XLNX_ZDMA(obj);
782     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
783 
784     memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
785                           TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
786     sysbus_init_mmio(sbd, &s->iomem);
787     sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
788 
789     object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
790                              (Object **)&s->dma_mr,
791                              qdev_prop_allow_set_link_before_realize,
792                              OBJ_PROP_LINK_STRONG,
793                              &error_abort);
794 }
795 
796 static const VMStateDescription vmstate_zdma = {
797     .name = TYPE_XLNX_ZDMA,
798     .version_id = 1,
799     .minimum_version_id = 1,
800     .minimum_version_id_old = 1,
801     .fields = (VMStateField[]) {
802         VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
803         VMSTATE_UINT32(state, XlnxZDMA),
804         VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
805         VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
806         VMSTATE_END_OF_LIST(),
807     }
808 };
809 
810 static Property zdma_props[] = {
811     DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
812     DEFINE_PROP_END_OF_LIST(),
813 };
814 
815 static void zdma_class_init(ObjectClass *klass, void *data)
816 {
817     DeviceClass *dc = DEVICE_CLASS(klass);
818 
819     dc->reset = zdma_reset;
820     dc->realize = zdma_realize;
821     dc->props = zdma_props;
822     dc->vmsd = &vmstate_zdma;
823 }
824 
825 static const TypeInfo zdma_info = {
826     .name          = TYPE_XLNX_ZDMA,
827     .parent        = TYPE_SYS_BUS_DEVICE,
828     .instance_size = sizeof(XlnxZDMA),
829     .class_init    = zdma_class_init,
830     .instance_init = zdma_init,
831 };
832 
833 static void zdma_register_types(void)
834 {
835     type_register_static(&zdma_info);
836 }
837 
838 type_init(zdma_register_types)
839