xref: /openbmc/qemu/hw/dma/xlnx_csu_dma.c (revision 63e6b564)
1 /*
2  * Xilinx Platform CSU Stream DMA emulation
3  *
4  * This implementation is based on
5  * https://github.com/Xilinx/qemu/blob/master/hw/dma/csu_stream_dma.c
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 or
10  * (at your option) version 3 of the License.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qemu/log.h"
23 #include "qapi/error.h"
24 #include "hw/irq.h"
25 #include "hw/qdev-properties.h"
26 #include "hw/sysbus.h"
27 #include "migration/vmstate.h"
28 #include "sysemu/dma.h"
29 #include "hw/ptimer.h"
30 #include "hw/stream.h"
31 #include "hw/register.h"
32 #include "hw/dma/xlnx_csu_dma.h"
33 
34 /*
35  * Ref: UG1087 (v1.7) February 8, 2019
36  * https://www.xilinx.com/html_docs/registers/ug1087/ug1087-zynq-ultrascale-registers
37  * CSUDMA Module section
38  */
39 REG32(ADDR, 0x0)
40     FIELD(ADDR, ADDR, 2, 30) /* wo */
41 REG32(SIZE, 0x4)
42     FIELD(SIZE, SIZE, 2, 27)
43     FIELD(SIZE, LAST_WORD, 0, 1) /* rw, only exists in SRC */
44 REG32(STATUS, 0x8)
45     FIELD(STATUS, DONE_CNT, 13, 3) /* wtc */
46     FIELD(STATUS, FIFO_LEVEL, 5, 8) /* ro */
47     FIELD(STATUS, OUTSTANDING, 1, 4) /* ro */
48     FIELD(STATUS, BUSY, 0, 1) /* ro */
49 REG32(CTRL, 0xc)
50     FIELD(CTRL, FIFOTHRESH, 25, 7) /* rw, only exists in DST, reset 0x40 */
51     FIELD(CTRL, APB_ERR_RESP, 24, 1) /* rw */
52     FIELD(CTRL, ENDIANNESS, 23, 1) /* rw */
53     FIELD(CTRL, AXI_BRST_TYPE, 22, 1) /* rw */
54     FIELD(CTRL, TIMEOUT_VAL, 10, 12) /* rw, reset: 0xFFE */
55     FIELD(CTRL, FIFO_THRESH, 2, 8) /* rw, reset: 0x80 */
56     FIELD(CTRL, PAUSE_STRM, 1, 1) /* rw */
57     FIELD(CTRL, PAUSE_MEM, 0, 1) /* rw */
58 REG32(CRC, 0x10)
59 REG32(INT_STATUS, 0x14)
60     FIELD(INT_STATUS, FIFO_OVERFLOW, 7, 1) /* wtc */
61     FIELD(INT_STATUS, INVALID_APB, 6, 1) /* wtc */
62     FIELD(INT_STATUS, THRESH_HIT, 5, 1) /* wtc */
63     FIELD(INT_STATUS, TIMEOUT_MEM, 4, 1) /* wtc */
64     FIELD(INT_STATUS, TIMEOUT_STRM, 3, 1) /* wtc */
65     FIELD(INT_STATUS, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
66     FIELD(INT_STATUS, DONE, 1, 1) /* wtc */
67     FIELD(INT_STATUS, MEM_DONE, 0, 1) /* wtc */
68 REG32(INT_ENABLE, 0x18)
69     FIELD(INT_ENABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
70     FIELD(INT_ENABLE, INVALID_APB, 6, 1) /* wtc */
71     FIELD(INT_ENABLE, THRESH_HIT, 5, 1) /* wtc */
72     FIELD(INT_ENABLE, TIMEOUT_MEM, 4, 1) /* wtc */
73     FIELD(INT_ENABLE, TIMEOUT_STRM, 3, 1) /* wtc */
74     FIELD(INT_ENABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
75     FIELD(INT_ENABLE, DONE, 1, 1) /* wtc */
76     FIELD(INT_ENABLE, MEM_DONE, 0, 1) /* wtc */
77 REG32(INT_DISABLE, 0x1c)
78     FIELD(INT_DISABLE, FIFO_OVERFLOW, 7, 1) /* wtc */
79     FIELD(INT_DISABLE, INVALID_APB, 6, 1) /* wtc */
80     FIELD(INT_DISABLE, THRESH_HIT, 5, 1) /* wtc */
81     FIELD(INT_DISABLE, TIMEOUT_MEM, 4, 1) /* wtc */
82     FIELD(INT_DISABLE, TIMEOUT_STRM, 3, 1) /* wtc */
83     FIELD(INT_DISABLE, AXI_BRESP_ERR, 2, 1) /* wtc, SRC: AXI_RDERR */
84     FIELD(INT_DISABLE, DONE, 1, 1) /* wtc */
85     FIELD(INT_DISABLE, MEM_DONE, 0, 1) /* wtc */
86 REG32(INT_MASK, 0x20)
87     FIELD(INT_MASK, FIFO_OVERFLOW, 7, 1) /* ro, reset: 0x1 */
88     FIELD(INT_MASK, INVALID_APB, 6, 1) /* ro, reset: 0x1 */
89     FIELD(INT_MASK, THRESH_HIT, 5, 1) /* ro, reset: 0x1 */
90     FIELD(INT_MASK, TIMEOUT_MEM, 4, 1) /* ro, reset: 0x1 */
91     FIELD(INT_MASK, TIMEOUT_STRM, 3, 1) /* ro, reset: 0x1 */
92     FIELD(INT_MASK, AXI_BRESP_ERR, 2, 1) /* ro, reset: 0x1, SRC: AXI_RDERR */
93     FIELD(INT_MASK, DONE, 1, 1) /* ro, reset: 0x1 */
94     FIELD(INT_MASK, MEM_DONE, 0, 1) /* ro, reset: 0x1 */
95 REG32(CTRL2, 0x24)
96     FIELD(CTRL2, ARCACHE, 24, 3) /* rw */
97     FIELD(CTRL2, ROUTE_BIT, 23, 1) /* rw */
98     FIELD(CTRL2, TIMEOUT_EN, 22, 1) /* rw */
99     FIELD(CTRL2, TIMEOUT_PRE, 4, 12) /* rw, reset: 0xFFF */
100     FIELD(CTRL2, MAX_OUTS_CMDS, 0, 4) /* rw, reset: 0x8 */
101 REG32(ADDR_MSB, 0x28)
102     FIELD(ADDR_MSB, ADDR_MSB, 0, 17) /* wo */
103 
104 #define R_CTRL_TIMEOUT_VAL_RESET    (0xFFE)
105 #define R_CTRL_FIFO_THRESH_RESET    (0x80)
106 #define R_CTRL_FIFOTHRESH_RESET     (0x40)
107 
108 #define R_CTRL2_TIMEOUT_PRE_RESET   (0xFFF)
109 #define R_CTRL2_MAX_OUTS_CMDS_RESET (0x8)
110 
111 #define XLNX_CSU_DMA_ERR_DEBUG      (0)
112 #define XLNX_CSU_DMA_INT_R_MASK     (0xff)
113 
114 /* UG1807: Set the prescaler value for the timeout in clk (~2.5ns) cycles */
115 #define XLNX_CSU_DMA_TIMER_FREQ     (400 * 1000 * 1000)
116 
xlnx_csu_dma_is_paused(XlnxCSUDMA * s)117 static bool xlnx_csu_dma_is_paused(XlnxCSUDMA *s)
118 {
119     bool paused;
120 
121     paused = !!(s->regs[R_CTRL] & R_CTRL_PAUSE_STRM_MASK);
122     paused |= !!(s->regs[R_CTRL] & R_CTRL_PAUSE_MEM_MASK);
123 
124     return paused;
125 }
126 
xlnx_csu_dma_get_eop(XlnxCSUDMA * s)127 static bool xlnx_csu_dma_get_eop(XlnxCSUDMA *s)
128 {
129     return s->r_size_last_word;
130 }
131 
xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA * s)132 static bool xlnx_csu_dma_burst_is_fixed(XlnxCSUDMA *s)
133 {
134     return !!(s->regs[R_CTRL] & R_CTRL_AXI_BRST_TYPE_MASK);
135 }
136 
xlnx_csu_dma_timeout_enabled(XlnxCSUDMA * s)137 static bool xlnx_csu_dma_timeout_enabled(XlnxCSUDMA *s)
138 {
139     return !!(s->regs[R_CTRL2] & R_CTRL2_TIMEOUT_EN_MASK);
140 }
141 
xlnx_csu_dma_update_done_cnt(XlnxCSUDMA * s,int a)142 static void xlnx_csu_dma_update_done_cnt(XlnxCSUDMA *s, int a)
143 {
144     int cnt;
145 
146     /* Increase DONE_CNT */
147     cnt = ARRAY_FIELD_EX32(s->regs, STATUS, DONE_CNT) + a;
148     ARRAY_FIELD_DP32(s->regs, STATUS, DONE_CNT, cnt);
149 }
150 
xlnx_csu_dma_data_process(XlnxCSUDMA * s,uint8_t * buf,uint32_t len)151 static void xlnx_csu_dma_data_process(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
152 {
153     uint32_t bswap;
154     uint32_t i;
155 
156     bswap = s->regs[R_CTRL] & R_CTRL_ENDIANNESS_MASK;
157     if (s->is_dst && !bswap) {
158         /* Fast when ENDIANNESS cleared */
159         return;
160     }
161 
162     for (i = 0; i < len; i += 4) {
163         uint8_t *b = &buf[i];
164         union {
165             uint8_t u8[4];
166             uint32_t u32;
167         } v = {
168             .u8 = { b[0], b[1], b[2], b[3] }
169         };
170 
171         if (!s->is_dst) {
172             s->regs[R_CRC] += v.u32;
173         }
174         if (bswap) {
175             /*
176              * No point using bswap, we need to writeback
177              * into a potentially unaligned pointer.
178              */
179             b[0] = v.u8[3];
180             b[1] = v.u8[2];
181             b[2] = v.u8[1];
182             b[3] = v.u8[0];
183         }
184     }
185 }
186 
xlnx_csu_dma_update_irq(XlnxCSUDMA * s)187 static void xlnx_csu_dma_update_irq(XlnxCSUDMA *s)
188 {
189     qemu_set_irq(s->irq, !!(s->regs[R_INT_STATUS] & ~s->regs[R_INT_MASK]));
190 }
191 
192 /* len is in bytes */
xlnx_csu_dma_read(XlnxCSUDMA * s,uint8_t * buf,uint32_t len)193 static uint32_t xlnx_csu_dma_read(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
194 {
195     hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
196     MemTxResult result = MEMTX_OK;
197 
198     if (xlnx_csu_dma_burst_is_fixed(s)) {
199         uint32_t i;
200 
201         for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
202             uint32_t mlen = MIN(len - i, s->width);
203 
204             result = address_space_rw(&s->dma_as, addr, s->attr,
205                                       buf + i, mlen, false);
206         }
207     } else {
208         result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, false);
209     }
210 
211     if (result == MEMTX_OK) {
212         xlnx_csu_dma_data_process(s, buf, len);
213     } else {
214         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
215                       " for mem read", __func__, addr);
216         s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
217         xlnx_csu_dma_update_irq(s);
218     }
219     return len;
220 }
221 
222 /* len is in bytes */
xlnx_csu_dma_write(XlnxCSUDMA * s,uint8_t * buf,uint32_t len)223 static uint32_t xlnx_csu_dma_write(XlnxCSUDMA *s, uint8_t *buf, uint32_t len)
224 {
225     hwaddr addr = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
226     MemTxResult result = MEMTX_OK;
227 
228     xlnx_csu_dma_data_process(s, buf, len);
229     if (xlnx_csu_dma_burst_is_fixed(s)) {
230         uint32_t i;
231 
232         for (i = 0; i < len && (result == MEMTX_OK); i += s->width) {
233             uint32_t mlen = MIN(len - i, s->width);
234 
235             result = address_space_rw(&s->dma_as, addr, s->attr,
236                                       buf, mlen, true);
237             buf += mlen;
238         }
239     } else {
240         result = address_space_rw(&s->dma_as, addr, s->attr, buf, len, true);
241     }
242 
243     if (result != MEMTX_OK) {
244         qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad address " HWADDR_FMT_plx
245                       " for mem write", __func__, addr);
246         s->regs[R_INT_STATUS] |= R_INT_STATUS_AXI_BRESP_ERR_MASK;
247         xlnx_csu_dma_update_irq(s);
248     }
249     return len;
250 }
251 
xlnx_csu_dma_done(XlnxCSUDMA * s)252 static void xlnx_csu_dma_done(XlnxCSUDMA *s)
253 {
254     s->regs[R_STATUS] &= ~R_STATUS_BUSY_MASK;
255     s->regs[R_INT_STATUS] |= R_INT_STATUS_DONE_MASK;
256 
257     if (!s->is_dst) {
258         s->regs[R_INT_STATUS] |= R_INT_STATUS_MEM_DONE_MASK;
259     }
260 
261     xlnx_csu_dma_update_done_cnt(s, 1);
262 }
263 
xlnx_csu_dma_advance(XlnxCSUDMA * s,uint32_t len)264 static uint32_t xlnx_csu_dma_advance(XlnxCSUDMA *s, uint32_t len)
265 {
266     uint32_t size = s->regs[R_SIZE];
267     hwaddr dst = (hwaddr)s->regs[R_ADDR_MSB] << 32 | s->regs[R_ADDR];
268 
269     assert(len <= size);
270 
271     size -= len;
272     s->regs[R_SIZE] = size;
273 
274     if (!xlnx_csu_dma_burst_is_fixed(s)) {
275         dst += len;
276         s->regs[R_ADDR] = (uint32_t) dst;
277         s->regs[R_ADDR_MSB] = dst >> 32;
278     }
279 
280     if (size == 0) {
281         xlnx_csu_dma_done(s);
282     }
283 
284     return size;
285 }
286 
xlnx_csu_dma_src_notify(void * opaque)287 static void xlnx_csu_dma_src_notify(void *opaque)
288 {
289     XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
290     unsigned char buf[4 * 1024];
291     size_t rlen = 0;
292 
293     ptimer_transaction_begin(s->src_timer);
294     /* Stop the backpreassure timer */
295     ptimer_stop(s->src_timer);
296 
297     while (s->regs[R_SIZE] && !xlnx_csu_dma_is_paused(s) &&
298            stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
299         uint32_t plen = MIN(s->regs[R_SIZE], sizeof buf);
300         bool eop = false;
301 
302         /* Did we fit it all? */
303         if (s->regs[R_SIZE] == plen && xlnx_csu_dma_get_eop(s)) {
304             eop = true;
305         }
306 
307         /* DMA transfer */
308         xlnx_csu_dma_read(s, buf, plen);
309         rlen = stream_push(s->tx_dev, buf, plen, eop);
310         xlnx_csu_dma_advance(s, rlen);
311     }
312 
313     if (xlnx_csu_dma_timeout_enabled(s) && s->regs[R_SIZE] &&
314         !stream_can_push(s->tx_dev, xlnx_csu_dma_src_notify, s)) {
315         uint32_t timeout = ARRAY_FIELD_EX32(s->regs, CTRL, TIMEOUT_VAL);
316         uint32_t div = ARRAY_FIELD_EX32(s->regs, CTRL2, TIMEOUT_PRE) + 1;
317         uint32_t freq = XLNX_CSU_DMA_TIMER_FREQ;
318 
319         freq /= div;
320         ptimer_set_freq(s->src_timer, freq);
321         ptimer_set_count(s->src_timer, timeout);
322         ptimer_run(s->src_timer, 1);
323     }
324 
325     ptimer_transaction_commit(s->src_timer);
326     xlnx_csu_dma_update_irq(s);
327 }
328 
addr_pre_write(RegisterInfo * reg,uint64_t val)329 static uint64_t addr_pre_write(RegisterInfo *reg, uint64_t val)
330 {
331     /* Address is word aligned */
332     return val & R_ADDR_ADDR_MASK;
333 }
334 
size_pre_write(RegisterInfo * reg,uint64_t val)335 static uint64_t size_pre_write(RegisterInfo *reg, uint64_t val)
336 {
337     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
338     uint64_t size = val & R_SIZE_SIZE_MASK;
339 
340     if (s->regs[R_SIZE] != 0) {
341         if (size || s->is_dst) {
342             qemu_log_mask(LOG_GUEST_ERROR,
343                           "%s: Starting DMA while already running.\n",
344                           __func__);
345         }
346     }
347 
348     if (!s->is_dst) {
349         s->r_size_last_word = !!(val & R_SIZE_LAST_WORD_MASK);
350     }
351 
352     /* Size is word aligned */
353     return size;
354 }
355 
size_post_read(RegisterInfo * reg,uint64_t val)356 static uint64_t size_post_read(RegisterInfo *reg, uint64_t val)
357 {
358     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
359 
360     return val | s->r_size_last_word;
361 }
362 
size_post_write(RegisterInfo * reg,uint64_t val)363 static void size_post_write(RegisterInfo *reg, uint64_t val)
364 {
365     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
366 
367     s->regs[R_STATUS] |= R_STATUS_BUSY_MASK;
368 
369     /*
370      * Note that if SIZE is programmed to 0, and the DMA is started,
371      * the interrupts DONE and MEM_DONE will be asserted.
372      */
373     if (s->regs[R_SIZE] == 0) {
374         xlnx_csu_dma_done(s);
375         xlnx_csu_dma_update_irq(s);
376         return;
377     }
378 
379     /* Set SIZE is considered the last step in transfer configuration */
380     if (!s->is_dst) {
381         xlnx_csu_dma_src_notify(s);
382     } else {
383         if (s->notify) {
384             s->notify(s->notify_opaque);
385         }
386     }
387 }
388 
status_pre_write(RegisterInfo * reg,uint64_t val)389 static uint64_t status_pre_write(RegisterInfo *reg, uint64_t val)
390 {
391     return val & (R_STATUS_DONE_CNT_MASK | R_STATUS_BUSY_MASK);
392 }
393 
ctrl_post_write(RegisterInfo * reg,uint64_t val)394 static void ctrl_post_write(RegisterInfo *reg, uint64_t val)
395 {
396     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
397 
398     if (!s->is_dst) {
399         if (!xlnx_csu_dma_is_paused(s)) {
400             xlnx_csu_dma_src_notify(s);
401         }
402     } else {
403         if (!xlnx_csu_dma_is_paused(s) && s->notify) {
404             s->notify(s->notify_opaque);
405         }
406     }
407 }
408 
int_status_pre_write(RegisterInfo * reg,uint64_t val)409 static uint64_t int_status_pre_write(RegisterInfo *reg, uint64_t val)
410 {
411     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
412 
413     /* DMA counter decrements when flag 'DONE' is cleared */
414     if ((val & s->regs[R_INT_STATUS] & R_INT_STATUS_DONE_MASK)) {
415         xlnx_csu_dma_update_done_cnt(s, -1);
416     }
417 
418     return s->regs[R_INT_STATUS] & ~val;
419 }
420 
int_status_post_write(RegisterInfo * reg,uint64_t val)421 static void int_status_post_write(RegisterInfo *reg, uint64_t val)
422 {
423     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
424 
425     xlnx_csu_dma_update_irq(s);
426 }
427 
int_enable_pre_write(RegisterInfo * reg,uint64_t val)428 static uint64_t int_enable_pre_write(RegisterInfo *reg, uint64_t val)
429 {
430     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
431     uint32_t v32 = val;
432 
433     /*
434      * R_INT_ENABLE doesn't have its own state.
435      * It is used to indirectly modify R_INT_MASK.
436      *
437      * 1: Enable this interrupt field (the mask bit will be cleared to 0)
438      * 0: No effect
439      */
440     s->regs[R_INT_MASK] &= ~v32;
441     return 0;
442 }
443 
int_enable_post_write(RegisterInfo * reg,uint64_t val)444 static void int_enable_post_write(RegisterInfo *reg, uint64_t val)
445 {
446     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
447 
448     xlnx_csu_dma_update_irq(s);
449 }
450 
int_disable_pre_write(RegisterInfo * reg,uint64_t val)451 static uint64_t int_disable_pre_write(RegisterInfo *reg, uint64_t val)
452 {
453     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
454     uint32_t v32 = val;
455 
456     /*
457      * R_INT_DISABLE doesn't have its own state.
458      * It is used to indirectly modify R_INT_MASK.
459      *
460      * 1: Disable this interrupt field (the mask bit will be set to 1)
461      * 0: No effect
462      */
463     s->regs[R_INT_MASK] |= v32;
464     return 0;
465 }
466 
int_disable_post_write(RegisterInfo * reg,uint64_t val)467 static void int_disable_post_write(RegisterInfo *reg, uint64_t val)
468 {
469     XlnxCSUDMA *s = XLNX_CSU_DMA(reg->opaque);
470 
471     xlnx_csu_dma_update_irq(s);
472 }
473 
addr_msb_pre_write(RegisterInfo * reg,uint64_t val)474 static uint64_t addr_msb_pre_write(RegisterInfo *reg, uint64_t val)
475 {
476     return val & R_ADDR_MSB_ADDR_MSB_MASK;
477 }
478 
xlnx_csu_dma_class_read(XlnxCSUDMA * s,hwaddr addr,uint32_t len)479 static MemTxResult xlnx_csu_dma_class_read(XlnxCSUDMA *s, hwaddr addr,
480                                            uint32_t len)
481 {
482     RegisterInfo *reg = &s->regs_info[R_SIZE];
483     uint64_t we = MAKE_64BIT_MASK(0, 4 * 8);
484 
485     s->regs[R_ADDR] = addr;
486     s->regs[R_ADDR_MSB] = (uint64_t)addr >> 32;
487 
488     register_write(reg, len, we, object_get_typename(OBJECT(s)), false);
489 
490     return (s->regs[R_SIZE] == 0) ? MEMTX_OK : MEMTX_ERROR;
491 }
492 
493 static const RegisterAccessInfo *xlnx_csu_dma_regs_info[] = {
494 #define DMACH_REGINFO(NAME, snd)                                              \
495     (const RegisterAccessInfo []) {                                           \
496         {                                                                     \
497             .name = #NAME "_ADDR",                                            \
498             .addr = A_ADDR,                                                   \
499             .pre_write = addr_pre_write                                       \
500         }, {                                                                  \
501             .name = #NAME "_SIZE",                                            \
502             .addr = A_SIZE,                                                   \
503             .pre_write = size_pre_write,                                      \
504             .post_write = size_post_write,                                    \
505             .post_read = size_post_read                                       \
506         }, {                                                                  \
507             .name = #NAME "_STATUS",                                          \
508             .addr = A_STATUS,                                                 \
509             .pre_write = status_pre_write,                                    \
510             .w1c = R_STATUS_DONE_CNT_MASK,                                    \
511             .ro = (R_STATUS_BUSY_MASK                                         \
512                    | R_STATUS_FIFO_LEVEL_MASK                                 \
513                    | R_STATUS_OUTSTANDING_MASK)                               \
514         }, {                                                                  \
515             .name = #NAME "_CTRL",                                            \
516             .addr = A_CTRL,                                                   \
517             .post_write = ctrl_post_write,                                    \
518             .reset = ((R_CTRL_TIMEOUT_VAL_RESET << R_CTRL_TIMEOUT_VAL_SHIFT)  \
519                       | (R_CTRL_FIFO_THRESH_RESET << R_CTRL_FIFO_THRESH_SHIFT)\
520                       | (snd ? 0 : R_CTRL_FIFOTHRESH_RESET                    \
521                          << R_CTRL_FIFOTHRESH_SHIFT))                         \
522         }, {                                                                  \
523             .name = #NAME "_CRC",                                             \
524             .addr = A_CRC,                                                    \
525         }, {                                                                  \
526             .name =  #NAME "_INT_STATUS",                                     \
527             .addr = A_INT_STATUS,                                             \
528             .pre_write = int_status_pre_write,                                \
529             .post_write = int_status_post_write                               \
530         }, {                                                                  \
531             .name = #NAME "_INT_ENABLE",                                      \
532             .addr = A_INT_ENABLE,                                             \
533             .pre_write = int_enable_pre_write,                                \
534             .post_write = int_enable_post_write                               \
535         }, {                                                                  \
536             .name = #NAME "_INT_DISABLE",                                     \
537             .addr = A_INT_DISABLE,                                            \
538             .pre_write = int_disable_pre_write,                               \
539             .post_write = int_disable_post_write                              \
540         }, {                                                                  \
541             .name = #NAME "_INT_MASK",                                        \
542             .addr = A_INT_MASK,                                               \
543             .ro = ~0,                                                         \
544             .reset = XLNX_CSU_DMA_INT_R_MASK                                  \
545         }, {                                                                  \
546             .name = #NAME "_CTRL2",                                           \
547             .addr = A_CTRL2,                                                  \
548             .reset = ((R_CTRL2_TIMEOUT_PRE_RESET                              \
549                        << R_CTRL2_TIMEOUT_PRE_SHIFT)                          \
550                       | (R_CTRL2_MAX_OUTS_CMDS_RESET                          \
551                          << R_CTRL2_MAX_OUTS_CMDS_SHIFT))                     \
552         }, {                                                                  \
553             .name = #NAME "_ADDR_MSB",                                        \
554             .addr = A_ADDR_MSB,                                               \
555             .pre_write = addr_msb_pre_write                                   \
556         }                                                                     \
557     }
558 
559     DMACH_REGINFO(DMA_SRC, true),
560     DMACH_REGINFO(DMA_DST, false)
561 };
562 
563 static const MemoryRegionOps xlnx_csu_dma_ops = {
564     .read = register_read_memory,
565     .write = register_write_memory,
566     .endianness = DEVICE_LITTLE_ENDIAN,
567     .valid = {
568         .min_access_size = 4,
569         .max_access_size = 4,
570     }
571 };
572 
xlnx_csu_dma_src_timeout_hit(void * opaque)573 static void xlnx_csu_dma_src_timeout_hit(void *opaque)
574 {
575     XlnxCSUDMA *s = XLNX_CSU_DMA(opaque);
576 
577     /* Ignore if the timeout is masked */
578     if (!xlnx_csu_dma_timeout_enabled(s)) {
579         return;
580     }
581 
582     s->regs[R_INT_STATUS] |= R_INT_STATUS_TIMEOUT_STRM_MASK;
583     xlnx_csu_dma_update_irq(s);
584 }
585 
xlnx_csu_dma_stream_push(StreamSink * obj,uint8_t * buf,size_t len,bool eop)586 static size_t xlnx_csu_dma_stream_push(StreamSink *obj, uint8_t *buf,
587                                        size_t len, bool eop)
588 {
589     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
590     uint32_t size = s->regs[R_SIZE];
591     uint32_t mlen = MIN(size, len) & (~3); /* Size is word aligned */
592 
593     /* Be called when it's DST */
594     assert(s->is_dst);
595 
596     if (size == 0 || len <= 0) {
597         return 0;
598     }
599 
600     if (len && (xlnx_csu_dma_is_paused(s) || mlen == 0)) {
601         qemu_log_mask(LOG_GUEST_ERROR,
602                       "csu-dma: DST channel dropping %zd b of data.\n", len);
603         s->regs[R_INT_STATUS] |= R_INT_STATUS_FIFO_OVERFLOW_MASK;
604         return len;
605     }
606 
607     if (xlnx_csu_dma_write(s, buf, mlen) != mlen) {
608         return 0;
609     }
610 
611     xlnx_csu_dma_advance(s, mlen);
612     xlnx_csu_dma_update_irq(s);
613 
614     return mlen;
615 }
616 
xlnx_csu_dma_stream_can_push(StreamSink * obj,StreamCanPushNotifyFn notify,void * notify_opaque)617 static bool xlnx_csu_dma_stream_can_push(StreamSink *obj,
618                                          StreamCanPushNotifyFn notify,
619                                          void *notify_opaque)
620 {
621     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
622 
623     if (s->regs[R_SIZE] != 0) {
624         return true;
625     } else {
626         s->notify = notify;
627         s->notify_opaque = notify_opaque;
628         return false;
629     }
630 }
631 
xlnx_csu_dma_reset(DeviceState * dev)632 static void xlnx_csu_dma_reset(DeviceState *dev)
633 {
634     XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
635     unsigned int i;
636 
637     for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
638         register_reset(&s->regs_info[i]);
639     }
640 }
641 
xlnx_csu_dma_realize(DeviceState * dev,Error ** errp)642 static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
643 {
644     XlnxCSUDMA *s = XLNX_CSU_DMA(dev);
645     RegisterInfoArray *reg_array;
646 
647     if (!s->is_dst && !s->tx_dev) {
648         error_setg(errp, "zynqmp.csu-dma: Stream not connected");
649         return;
650     }
651 
652     if (!s->dma_mr) {
653         error_setg(errp, TYPE_XLNX_CSU_DMA " 'dma' link not set");
654         return;
655     }
656     address_space_init(&s->dma_as, s->dma_mr, "csu-dma");
657 
658     reg_array =
659         register_init_block32(dev, xlnx_csu_dma_regs_info[!!s->is_dst],
660                               XLNX_CSU_DMA_R_MAX,
661                               s->regs_info, s->regs,
662                               &xlnx_csu_dma_ops,
663                               XLNX_CSU_DMA_ERR_DEBUG,
664                               XLNX_CSU_DMA_R_MAX * 4);
665     memory_region_add_subregion(&s->iomem,
666                                 0x0,
667                                 &reg_array->mem);
668 
669     sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
670     sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
671 
672     s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
673                                s, PTIMER_POLICY_LEGACY);
674 
675     s->attr = MEMTXATTRS_UNSPECIFIED;
676 
677     s->r_size_last_word = 0;
678 }
679 
680 static const VMStateDescription vmstate_xlnx_csu_dma = {
681     .name = TYPE_XLNX_CSU_DMA,
682     .version_id = 0,
683     .minimum_version_id = 0,
684     .fields = (const VMStateField[]) {
685         VMSTATE_PTIMER(src_timer, XlnxCSUDMA),
686         VMSTATE_UINT16(width, XlnxCSUDMA),
687         VMSTATE_BOOL(is_dst, XlnxCSUDMA),
688         VMSTATE_BOOL(r_size_last_word, XlnxCSUDMA),
689         VMSTATE_UINT32_ARRAY(regs, XlnxCSUDMA, XLNX_CSU_DMA_R_MAX),
690         VMSTATE_END_OF_LIST(),
691     }
692 };
693 
694 static Property xlnx_csu_dma_properties[] = {
695     /*
696      * Ref PG021, Stream Data Width:
697      * Data width in bits of the AXI S2MM AXI4-Stream Data bus.
698      * This value must be equal or less than the Memory Map Data Width.
699      * Valid values are 8, 16, 32, 64, 128, 512 and 1024.
700      * "dma-width" is the byte value of the "Stream Data Width".
701      */
702     DEFINE_PROP_UINT16("dma-width", XlnxCSUDMA, width, 4),
703     /*
704      * The CSU DMA is a two-channel, simple DMA, allowing separate control of
705      * the SRC (read) channel and DST (write) channel. "is-dst" is used to mark
706      * which channel the device is connected to.
707      */
708     DEFINE_PROP_BOOL("is-dst", XlnxCSUDMA, is_dst, true),
709     DEFINE_PROP_LINK("stream-connected-dma", XlnxCSUDMA, tx_dev,
710                      TYPE_STREAM_SINK, StreamSink *),
711     DEFINE_PROP_LINK("dma", XlnxCSUDMA, dma_mr,
712                      TYPE_MEMORY_REGION, MemoryRegion *),
713     DEFINE_PROP_END_OF_LIST(),
714 };
715 
xlnx_csu_dma_class_init(ObjectClass * klass,void * data)716 static void xlnx_csu_dma_class_init(ObjectClass *klass, void *data)
717 {
718     DeviceClass *dc = DEVICE_CLASS(klass);
719     StreamSinkClass *ssc = STREAM_SINK_CLASS(klass);
720     XlnxCSUDMAClass *xcdc = XLNX_CSU_DMA_CLASS(klass);
721 
722     dc->reset = xlnx_csu_dma_reset;
723     dc->realize = xlnx_csu_dma_realize;
724     dc->vmsd = &vmstate_xlnx_csu_dma;
725     device_class_set_props(dc, xlnx_csu_dma_properties);
726 
727     ssc->push = xlnx_csu_dma_stream_push;
728     ssc->can_push = xlnx_csu_dma_stream_can_push;
729 
730     xcdc->read = xlnx_csu_dma_class_read;
731 }
732 
xlnx_csu_dma_init(Object * obj)733 static void xlnx_csu_dma_init(Object *obj)
734 {
735     XlnxCSUDMA *s = XLNX_CSU_DMA(obj);
736 
737     memory_region_init(&s->iomem, obj, TYPE_XLNX_CSU_DMA,
738                        XLNX_CSU_DMA_R_MAX * 4);
739 }
740 
741 static const TypeInfo xlnx_csu_dma_info = {
742     .name          = TYPE_XLNX_CSU_DMA,
743     .parent        = TYPE_SYS_BUS_DEVICE,
744     .instance_size = sizeof(XlnxCSUDMA),
745     .class_init    = xlnx_csu_dma_class_init,
746     .class_size    = sizeof(XlnxCSUDMAClass),
747     .instance_init = xlnx_csu_dma_init,
748     .interfaces = (InterfaceInfo[]) {
749         { TYPE_STREAM_SINK },
750         { }
751     }
752 };
753 
xlnx_csu_dma_register_types(void)754 static void xlnx_csu_dma_register_types(void)
755 {
756     type_register_static(&xlnx_csu_dma_info);
757 }
758 
759 type_init(xlnx_csu_dma_register_types)
760