1 /*
2 * QEMU model of the ZynqMP generic DMA
3 *
4 * Copyright (c) 2014 Xilinx Inc.
5 * Copyright (c) 2018 FEIMTECH AB
6 *
7 * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
8 * Francisco Iglesias <francisco.iglesias@feimtech.se>
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
28
29 #include "qemu/osdep.h"
30 #include "hw/dma/xlnx-zdma.h"
31 #include "hw/irq.h"
32 #include "hw/qdev-properties.h"
33 #include "migration/vmstate.h"
34 #include "qemu/bitops.h"
35 #include "qemu/log.h"
36 #include "qemu/module.h"
37 #include "qapi/error.h"
38
39 #ifndef XLNX_ZDMA_ERR_DEBUG
40 #define XLNX_ZDMA_ERR_DEBUG 0
41 #endif
42
43 REG32(ZDMA_ERR_CTRL, 0x0)
44 FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
45 REG32(ZDMA_CH_ISR, 0x100)
46 FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
47 FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
48 FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
49 FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
50 FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
51 FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
52 FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
53 FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
54 FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
55 FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
56 FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
57 FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
58 REG32(ZDMA_CH_IMR, 0x104)
59 FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
60 FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
61 FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
62 FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
63 FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
64 FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
65 FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
66 FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
67 FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
68 FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
69 FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
70 FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
71 REG32(ZDMA_CH_IEN, 0x108)
72 FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
73 FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
74 FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
75 FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
76 FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
77 FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
78 FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
79 FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
80 FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
81 FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
82 FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
83 FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
84 REG32(ZDMA_CH_IDS, 0x10c)
85 FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
86 FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
87 FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
88 FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
89 FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
90 FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
91 FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
92 FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
93 FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
94 FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
95 FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
96 FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
97 REG32(ZDMA_CH_CTRL0, 0x110)
98 FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
99 FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
100 FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
101 FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
102 FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
103 FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
104 REG32(ZDMA_CH_CTRL1, 0x114)
105 FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
106 FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
107 REG32(ZDMA_CH_FCI, 0x118)
108 FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
109 FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
110 FIELD(ZDMA_CH_FCI, EN, 0, 1)
111 REG32(ZDMA_CH_STATUS, 0x11c)
112 FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
113 REG32(ZDMA_CH_DATA_ATTR, 0x120)
114 FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
115 FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
116 FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
117 FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
118 FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
119 FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
120 FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
121 FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
122 REG32(ZDMA_CH_DSCR_ATTR, 0x124)
123 FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
124 FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
125 FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
126 REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
127 REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
128 FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
129 REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
130 FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
131 REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
132 FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
133 FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
134 FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
135 FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
136 REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
137 REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
138 FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
139 REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
140 FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
141 REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
142 FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
143 FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
144 FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
145 REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
146 REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
147 REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
148 REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
149 REG32(ZDMA_CH_SRC_START_LSB, 0x158)
150 REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
151 FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
152 REG32(ZDMA_CH_DST_START_LSB, 0x160)
153 REG32(ZDMA_CH_DST_START_MSB, 0x164)
154 FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
155 REG32(ZDMA_CH_RATE_CTRL, 0x18c)
156 FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
157 REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
158 REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
159 FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
160 REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
161 REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
162 FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
163 REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
164 REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
165 FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
166 REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
167 REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
168 FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
169 REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
170 REG32(ZDMA_CH_RATE_CNTL, 0x18c)
171 FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
172 REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
173 FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
174 REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
175 FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
176 REG32(ZDMA_CH_DBG0, 0x198)
177 FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
178 REG32(ZDMA_CH_DBG1, 0x19c)
179 FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
180 REG32(ZDMA_CH_CTRL2, 0x200)
181 FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
182
183 enum {
184 PT_REG = 0,
185 PT_MEM = 1,
186 };
187
188 enum {
189 CMD_HALT = 1,
190 CMD_STOP = 2,
191 };
192
193 enum {
194 RW_MODE_RW = 0,
195 RW_MODE_WO = 1,
196 RW_MODE_RO = 2,
197 };
198
199 enum {
200 DTYPE_LINEAR = 0,
201 DTYPE_LINKED = 1,
202 };
203
204 enum {
205 AXI_BURST_FIXED = 0,
206 AXI_BURST_INCR = 1,
207 };
208
zdma_ch_imr_update_irq(XlnxZDMA * s)209 static void zdma_ch_imr_update_irq(XlnxZDMA *s)
210 {
211 bool pending;
212
213 pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
214
215 qemu_set_irq(s->irq_zdma_ch_imr, pending);
216 }
217
zdma_ch_isr_postw(RegisterInfo * reg,uint64_t val64)218 static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
219 {
220 XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
221 zdma_ch_imr_update_irq(s);
222 }
223
zdma_ch_ien_prew(RegisterInfo * reg,uint64_t val64)224 static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
225 {
226 XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
227 uint32_t val = val64;
228
229 s->regs[R_ZDMA_CH_IMR] &= ~val;
230 zdma_ch_imr_update_irq(s);
231 return 0;
232 }
233
zdma_ch_ids_prew(RegisterInfo * reg,uint64_t val64)234 static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
235 {
236 XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
237 uint32_t val = val64;
238
239 s->regs[R_ZDMA_CH_IMR] |= val;
240 zdma_ch_imr_update_irq(s);
241 return 0;
242 }
243
zdma_set_state(XlnxZDMA * s,XlnxZDMAState state)244 static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
245 {
246 s->state = state;
247 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
248
249 /* Signal error if we have an error condition. */
250 if (s->error) {
251 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
252 }
253 }
254
zdma_src_done(XlnxZDMA * s)255 static void zdma_src_done(XlnxZDMA *s)
256 {
257 unsigned int cnt;
258 cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
259 cnt++;
260 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
261 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
262
263 /* Did we overflow? */
264 if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
265 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
266 }
267 zdma_ch_imr_update_irq(s);
268 }
269
zdma_dst_done(XlnxZDMA * s)270 static void zdma_dst_done(XlnxZDMA *s)
271 {
272 unsigned int cnt;
273 cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
274 cnt++;
275 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
276 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
277
278 /* Did we overflow? */
279 if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
280 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
281 }
282 zdma_ch_imr_update_irq(s);
283 }
284
zdma_get_regaddr64(XlnxZDMA * s,unsigned int basereg)285 static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
286 {
287 uint64_t addr;
288
289 addr = s->regs[basereg + 1];
290 addr <<= 32;
291 addr |= s->regs[basereg];
292
293 return addr;
294 }
295
zdma_put_regaddr64(XlnxZDMA * s,unsigned int basereg,uint64_t addr)296 static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
297 {
298 s->regs[basereg] = addr;
299 s->regs[basereg + 1] = addr >> 32;
300 }
301
zdma_load_descriptor_reg(XlnxZDMA * s,unsigned int reg,XlnxZDMADescr * descr)302 static void zdma_load_descriptor_reg(XlnxZDMA *s, unsigned int reg,
303 XlnxZDMADescr *descr)
304 {
305 descr->addr = zdma_get_regaddr64(s, reg);
306 descr->size = s->regs[reg + 2];
307 descr->attr = s->regs[reg + 3];
308 }
309
zdma_load_descriptor(XlnxZDMA * s,uint64_t addr,XlnxZDMADescr * descr)310 static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr,
311 XlnxZDMADescr *descr)
312 {
313 /* ZDMA descriptors must be aligned to their own size. */
314 if (addr % sizeof(XlnxZDMADescr)) {
315 qemu_log_mask(LOG_GUEST_ERROR,
316 "zdma: unaligned descriptor at %" PRIx64,
317 addr);
318 memset(descr, 0x0, sizeof(XlnxZDMADescr));
319 s->error = true;
320 return false;
321 }
322
323 descr->addr = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL);
324 descr->size = address_space_ldl_le(&s->dma_as, addr + 8, s->attr, NULL);
325 descr->attr = address_space_ldl_le(&s->dma_as, addr + 12, s->attr, NULL);
326 return true;
327 }
328
zdma_load_src_descriptor(XlnxZDMA * s)329 static void zdma_load_src_descriptor(XlnxZDMA *s)
330 {
331 uint64_t src_addr;
332 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
333
334 if (ptype == PT_REG) {
335 zdma_load_descriptor_reg(s, R_ZDMA_CH_SRC_DSCR_WORD0, &s->dsc_src);
336 return;
337 }
338
339 src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
340
341 if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
342 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
343 }
344 }
345
zdma_update_descr_addr(XlnxZDMA * s,bool type,unsigned int basereg)346 static void zdma_update_descr_addr(XlnxZDMA *s, bool type,
347 unsigned int basereg)
348 {
349 uint64_t addr, next;
350
351 if (type == DTYPE_LINEAR) {
352 addr = zdma_get_regaddr64(s, basereg);
353 next = addr + sizeof(s->dsc_dst);
354 } else {
355 addr = zdma_get_regaddr64(s, basereg);
356 addr += sizeof(s->dsc_dst);
357 next = address_space_ldq_le(&s->dma_as, addr, s->attr, NULL);
358 }
359
360 zdma_put_regaddr64(s, basereg, next);
361 }
362
zdma_load_dst_descriptor(XlnxZDMA * s)363 static void zdma_load_dst_descriptor(XlnxZDMA *s)
364 {
365 uint64_t dst_addr;
366 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
367 bool dst_type;
368
369 if (ptype == PT_REG) {
370 zdma_load_descriptor_reg(s, R_ZDMA_CH_DST_DSCR_WORD0, &s->dsc_dst);
371 return;
372 }
373
374 dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
375
376 if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
377 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
378 }
379
380 /* Advance the descriptor pointer. */
381 dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, TYPE);
382 zdma_update_descr_addr(s, dst_type, R_ZDMA_CH_DST_CUR_DSCR_LSB);
383 }
384
zdma_write_dst(XlnxZDMA * s,uint8_t * buf,uint32_t len)385 static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
386 {
387 uint32_t dst_size, dlen;
388 bool dst_intr;
389 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
390 unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
391 unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
392 AWBURST);
393
394 /* FIXED burst types are only supported in simple dma mode. */
395 if (ptype != PT_REG) {
396 burst_type = AXI_BURST_INCR;
397 }
398
399 while (len) {
400 dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
401 SIZE);
402 if (dst_size == 0 && ptype == PT_MEM) {
403 zdma_load_dst_descriptor(s);
404 dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
405 SIZE);
406 }
407
408 /* Match what hardware does by ignoring the dst_size and only using
409 * the src size for Simple register mode. */
410 if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
411 dst_size = len;
412 }
413
414 dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
415 INTR);
416
417 dlen = len > dst_size ? dst_size : len;
418 if (burst_type == AXI_BURST_FIXED) {
419 if (dlen > (s->cfg.bus_width / 8)) {
420 dlen = s->cfg.bus_width / 8;
421 }
422 }
423
424 address_space_write(&s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen);
425 if (burst_type == AXI_BURST_INCR) {
426 s->dsc_dst.addr += dlen;
427 }
428 dst_size -= dlen;
429 buf += dlen;
430 len -= dlen;
431
432 if (dst_size == 0 && dst_intr) {
433 zdma_dst_done(s);
434 }
435
436 /* Write back to buffered descriptor. */
437 s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
438 ZDMA_CH_DST_DSCR_WORD2,
439 SIZE,
440 dst_size);
441 }
442 }
443
zdma_process_descr(XlnxZDMA * s)444 static void zdma_process_descr(XlnxZDMA *s)
445 {
446 uint64_t src_addr;
447 uint32_t src_size, len;
448 unsigned int src_cmd;
449 bool src_intr, src_type;
450 unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
451 unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
452 unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
453 ARBURST);
454
455 src_addr = s->dsc_src.addr;
456 src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
457 src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
458 src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
459 src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
460
461 /* FIXED burst types and non-rw modes are only supported in
462 * simple dma mode.
463 */
464 if (ptype != PT_REG) {
465 if (rw_mode != RW_MODE_RW) {
466 qemu_log_mask(LOG_GUEST_ERROR,
467 "zDMA: rw-mode=%d but not simple DMA mode.\n",
468 rw_mode);
469 }
470 if (burst_type != AXI_BURST_INCR) {
471 qemu_log_mask(LOG_GUEST_ERROR,
472 "zDMA: burst_type=%d but not simple DMA mode.\n",
473 burst_type);
474 }
475 burst_type = AXI_BURST_INCR;
476 rw_mode = RW_MODE_RW;
477 }
478
479 if (rw_mode == RW_MODE_WO) {
480 /* In Simple DMA Write-Only, we need to push DST size bytes
481 * regardless of what SRC size is set to. */
482 src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
483 SIZE);
484 memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
485 }
486
487 while (src_size) {
488 len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
489 if (burst_type == AXI_BURST_FIXED) {
490 if (len > (s->cfg.bus_width / 8)) {
491 len = s->cfg.bus_width / 8;
492 }
493 }
494
495 if (rw_mode == RW_MODE_WO) {
496 if (len > s->cfg.bus_width / 8) {
497 len = s->cfg.bus_width / 8;
498 }
499 } else {
500 address_space_read(&s->dma_as, src_addr, s->attr, s->buf, len);
501 if (burst_type == AXI_BURST_INCR) {
502 src_addr += len;
503 }
504 }
505
506 if (rw_mode != RW_MODE_RO) {
507 zdma_write_dst(s, s->buf, len);
508 }
509
510 s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
511 src_size -= len;
512 }
513
514 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
515
516 if (src_intr) {
517 zdma_src_done(s);
518 }
519
520 if (ptype == PT_REG || src_cmd == CMD_STOP) {
521 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
522 zdma_set_state(s, DISABLED);
523 }
524
525 if (src_cmd == CMD_HALT) {
526 zdma_set_state(s, PAUSED);
527 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
528 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, false);
529 zdma_ch_imr_update_irq(s);
530 return;
531 }
532
533 zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
534 }
535
zdma_run(XlnxZDMA * s)536 static void zdma_run(XlnxZDMA *s)
537 {
538 while (s->state == ENABLED && !s->error) {
539 zdma_load_src_descriptor(s);
540
541 if (s->error) {
542 zdma_set_state(s, DISABLED);
543 } else {
544 zdma_process_descr(s);
545 }
546 }
547
548 zdma_ch_imr_update_irq(s);
549 }
550
zdma_update_descr_addr_from_start(XlnxZDMA * s)551 static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
552 {
553 uint64_t src_addr, dst_addr;
554
555 src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
556 zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
557 dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
558 zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
559 zdma_load_dst_descriptor(s);
560 }
561
zdma_ch_ctrlx_postw(RegisterInfo * reg,uint64_t val64)562 static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
563 {
564 XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
565
566 if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
567 s->error = false;
568
569 if (s->state == PAUSED &&
570 ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
571 if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
572 zdma_update_descr_addr_from_start(s);
573 } else {
574 bool src_type = FIELD_EX32(s->dsc_src.words[3],
575 ZDMA_CH_SRC_DSCR_WORD3, TYPE);
576 zdma_update_descr_addr(s, src_type,
577 R_ZDMA_CH_SRC_CUR_DSCR_LSB);
578 }
579 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
580 zdma_set_state(s, ENABLED);
581 } else if (s->state == DISABLED) {
582 zdma_update_descr_addr_from_start(s);
583 zdma_set_state(s, ENABLED);
584 }
585 } else {
586 /* Leave Paused state? */
587 if (s->state == PAUSED &&
588 ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
589 zdma_set_state(s, DISABLED);
590 }
591 }
592
593 zdma_run(s);
594 }
595
596 static RegisterAccessInfo zdma_regs_info[] = {
597 { .name = "ZDMA_ERR_CTRL", .addr = A_ZDMA_ERR_CTRL,
598 .rsvd = 0xfffffffe,
599 },{ .name = "ZDMA_CH_ISR", .addr = A_ZDMA_CH_ISR,
600 .rsvd = 0xfffff000,
601 .w1c = 0xfff,
602 .post_write = zdma_ch_isr_postw,
603 },{ .name = "ZDMA_CH_IMR", .addr = A_ZDMA_CH_IMR,
604 .reset = 0xfff,
605 .rsvd = 0xfffff000,
606 .ro = 0xfff,
607 },{ .name = "ZDMA_CH_IEN", .addr = A_ZDMA_CH_IEN,
608 .rsvd = 0xfffff000,
609 .pre_write = zdma_ch_ien_prew,
610 },{ .name = "ZDMA_CH_IDS", .addr = A_ZDMA_CH_IDS,
611 .rsvd = 0xfffff000,
612 .pre_write = zdma_ch_ids_prew,
613 },{ .name = "ZDMA_CH_CTRL0", .addr = A_ZDMA_CH_CTRL0,
614 .reset = 0x80,
615 .rsvd = 0xffffff01,
616 .post_write = zdma_ch_ctrlx_postw,
617 },{ .name = "ZDMA_CH_CTRL1", .addr = A_ZDMA_CH_CTRL1,
618 .reset = 0x3ff,
619 .rsvd = 0xfffffc00,
620 },{ .name = "ZDMA_CH_FCI", .addr = A_ZDMA_CH_FCI,
621 .rsvd = 0xffffffc0,
622 },{ .name = "ZDMA_CH_STATUS", .addr = A_ZDMA_CH_STATUS,
623 .rsvd = 0xfffffffc,
624 .ro = 0x3,
625 },{ .name = "ZDMA_CH_DATA_ATTR", .addr = A_ZDMA_CH_DATA_ATTR,
626 .reset = 0x483d20f,
627 .rsvd = 0xf0000000,
628 },{ .name = "ZDMA_CH_DSCR_ATTR", .addr = A_ZDMA_CH_DSCR_ATTR,
629 .rsvd = 0xfffffe00,
630 },{ .name = "ZDMA_CH_SRC_DSCR_WORD0", .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
631 },{ .name = "ZDMA_CH_SRC_DSCR_WORD1", .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
632 .rsvd = 0xfffe0000,
633 },{ .name = "ZDMA_CH_SRC_DSCR_WORD2", .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
634 .rsvd = 0xc0000000,
635 },{ .name = "ZDMA_CH_SRC_DSCR_WORD3", .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
636 .rsvd = 0xffffffe0,
637 },{ .name = "ZDMA_CH_DST_DSCR_WORD0", .addr = A_ZDMA_CH_DST_DSCR_WORD0,
638 },{ .name = "ZDMA_CH_DST_DSCR_WORD1", .addr = A_ZDMA_CH_DST_DSCR_WORD1,
639 .rsvd = 0xfffe0000,
640 },{ .name = "ZDMA_CH_DST_DSCR_WORD2", .addr = A_ZDMA_CH_DST_DSCR_WORD2,
641 .rsvd = 0xc0000000,
642 },{ .name = "ZDMA_CH_DST_DSCR_WORD3", .addr = A_ZDMA_CH_DST_DSCR_WORD3,
643 .rsvd = 0xfffffffa,
644 },{ .name = "ZDMA_CH_WR_ONLY_WORD0", .addr = A_ZDMA_CH_WR_ONLY_WORD0,
645 },{ .name = "ZDMA_CH_WR_ONLY_WORD1", .addr = A_ZDMA_CH_WR_ONLY_WORD1,
646 },{ .name = "ZDMA_CH_WR_ONLY_WORD2", .addr = A_ZDMA_CH_WR_ONLY_WORD2,
647 },{ .name = "ZDMA_CH_WR_ONLY_WORD3", .addr = A_ZDMA_CH_WR_ONLY_WORD3,
648 },{ .name = "ZDMA_CH_SRC_START_LSB", .addr = A_ZDMA_CH_SRC_START_LSB,
649 },{ .name = "ZDMA_CH_SRC_START_MSB", .addr = A_ZDMA_CH_SRC_START_MSB,
650 .rsvd = 0xfffe0000,
651 },{ .name = "ZDMA_CH_DST_START_LSB", .addr = A_ZDMA_CH_DST_START_LSB,
652 },{ .name = "ZDMA_CH_DST_START_MSB", .addr = A_ZDMA_CH_DST_START_MSB,
653 .rsvd = 0xfffe0000,
654 },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
655 .ro = 0xffffffff,
656 },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB", .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
657 .rsvd = 0xfffe0000,
658 .ro = 0x1ffff,
659 },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
660 .ro = 0xffffffff,
661 },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB", .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
662 .rsvd = 0xfffe0000,
663 .ro = 0x1ffff,
664 },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
665 .ro = 0xffffffff,
666 },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB", .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
667 .rsvd = 0xfffe0000,
668 .ro = 0x1ffff,
669 },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
670 .ro = 0xffffffff,
671 },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB", .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
672 .rsvd = 0xfffe0000,
673 .ro = 0x1ffff,
674 },{ .name = "ZDMA_CH_TOTAL_BYTE", .addr = A_ZDMA_CH_TOTAL_BYTE,
675 .w1c = 0xffffffff,
676 },{ .name = "ZDMA_CH_RATE_CNTL", .addr = A_ZDMA_CH_RATE_CNTL,
677 .rsvd = 0xfffff000,
678 },{ .name = "ZDMA_CH_IRQ_SRC_ACCT", .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
679 .rsvd = 0xffffff00,
680 .ro = 0xff,
681 .cor = 0xff,
682 },{ .name = "ZDMA_CH_IRQ_DST_ACCT", .addr = A_ZDMA_CH_IRQ_DST_ACCT,
683 .rsvd = 0xffffff00,
684 .ro = 0xff,
685 .cor = 0xff,
686 },{ .name = "ZDMA_CH_DBG0", .addr = A_ZDMA_CH_DBG0,
687 .rsvd = 0xfffffe00,
688 .ro = 0x1ff,
689
690 /*
691 * There's SW out there that will check the debug regs for free space.
692 * Claim that we always have 0x100 free.
693 */
694 .reset = 0x100
695 },{ .name = "ZDMA_CH_DBG1", .addr = A_ZDMA_CH_DBG1,
696 .rsvd = 0xfffffe00,
697 .ro = 0x1ff,
698 },{ .name = "ZDMA_CH_CTRL2", .addr = A_ZDMA_CH_CTRL2,
699 .rsvd = 0xfffffffe,
700 .post_write = zdma_ch_ctrlx_postw,
701 }
702 };
703
zdma_reset(DeviceState * dev)704 static void zdma_reset(DeviceState *dev)
705 {
706 XlnxZDMA *s = XLNX_ZDMA(dev);
707 unsigned int i;
708
709 for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
710 register_reset(&s->regs_info[i]);
711 }
712
713 zdma_ch_imr_update_irq(s);
714 }
715
zdma_read(void * opaque,hwaddr addr,unsigned size)716 static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
717 {
718 XlnxZDMA *s = XLNX_ZDMA(opaque);
719 RegisterInfo *r = &s->regs_info[addr / 4];
720
721 if (!r->data) {
722 char *path = object_get_canonical_path(OBJECT(s));
723 qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
724 path,
725 addr);
726 g_free(path);
727 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
728 zdma_ch_imr_update_irq(s);
729 return 0;
730 }
731 return register_read(r, ~0, NULL, false);
732 }
733
zdma_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)734 static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
735 unsigned size)
736 {
737 XlnxZDMA *s = XLNX_ZDMA(opaque);
738 RegisterInfo *r = &s->regs_info[addr / 4];
739
740 if (!r->data) {
741 char *path = object_get_canonical_path(OBJECT(s));
742 qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
743 path,
744 addr, value);
745 g_free(path);
746 ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
747 zdma_ch_imr_update_irq(s);
748 return;
749 }
750 register_write(r, value, ~0, NULL, false);
751 }
752
753 static const MemoryRegionOps zdma_ops = {
754 .read = zdma_read,
755 .write = zdma_write,
756 .endianness = DEVICE_LITTLE_ENDIAN,
757 .valid = {
758 .min_access_size = 4,
759 .max_access_size = 4,
760 },
761 };
762
zdma_realize(DeviceState * dev,Error ** errp)763 static void zdma_realize(DeviceState *dev, Error **errp)
764 {
765 XlnxZDMA *s = XLNX_ZDMA(dev);
766 unsigned int i;
767
768 if (!s->dma_mr) {
769 error_setg(errp, TYPE_XLNX_ZDMA " 'dma' link not set");
770 return;
771 }
772 address_space_init(&s->dma_as, s->dma_mr, "zdma-dma");
773
774 for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
775 RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
776
777 *r = (RegisterInfo) {
778 .data = (uint8_t *)&s->regs[
779 zdma_regs_info[i].addr / 4],
780 .data_size = sizeof(uint32_t),
781 .access = &zdma_regs_info[i],
782 .opaque = s,
783 };
784 }
785
786 s->attr = MEMTXATTRS_UNSPECIFIED;
787 }
788
zdma_init(Object * obj)789 static void zdma_init(Object *obj)
790 {
791 XlnxZDMA *s = XLNX_ZDMA(obj);
792 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
793
794 memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
795 TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
796 sysbus_init_mmio(sbd, &s->iomem);
797 sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
798 }
799
800 static const VMStateDescription vmstate_zdma = {
801 .name = TYPE_XLNX_ZDMA,
802 .version_id = 1,
803 .minimum_version_id = 1,
804 .fields = (const VMStateField[]) {
805 VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
806 VMSTATE_UINT32(state, XlnxZDMA),
807 VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
808 VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
809 VMSTATE_END_OF_LIST(),
810 }
811 };
812
813 static Property zdma_props[] = {
814 DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
815 DEFINE_PROP_LINK("dma", XlnxZDMA, dma_mr,
816 TYPE_MEMORY_REGION, MemoryRegion *),
817 DEFINE_PROP_END_OF_LIST(),
818 };
819
zdma_class_init(ObjectClass * klass,void * data)820 static void zdma_class_init(ObjectClass *klass, void *data)
821 {
822 DeviceClass *dc = DEVICE_CLASS(klass);
823
824 device_class_set_legacy_reset(dc, zdma_reset);
825 dc->realize = zdma_realize;
826 device_class_set_props(dc, zdma_props);
827 dc->vmsd = &vmstate_zdma;
828 }
829
830 static const TypeInfo zdma_info = {
831 .name = TYPE_XLNX_ZDMA,
832 .parent = TYPE_SYS_BUS_DEVICE,
833 .instance_size = sizeof(XlnxZDMA),
834 .class_init = zdma_class_init,
835 .instance_init = zdma_init,
836 };
837
zdma_register_types(void)838 static void zdma_register_types(void)
839 {
840 type_register_static(&zdma_info);
841 }
842
843 type_init(zdma_register_types)
844