xref: /openbmc/qemu/hw/dma/xilinx_axidma.c (revision 41a1a9c4)
1 /*
2  * QEMU model of Xilinx AXI-DMA block.
3  *
4  * Copyright (c) 2011 Edgar E. Iglesias.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "hw/sysbus.h"
26 #include "qemu/timer.h"
27 #include "hw/ptimer.h"
28 #include "qemu/log.h"
29 #include "qapi/qmp/qerror.h"
30 #include "qemu/main-loop.h"
31 
32 #include "hw/stream.h"
33 
34 #define D(x)
35 
36 #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
37 #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
38 #define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream"
39 
40 #define XILINX_AXI_DMA(obj) \
41      OBJECT_CHECK(XilinxAXIDMA, (obj), TYPE_XILINX_AXI_DMA)
42 
43 #define XILINX_AXI_DMA_DATA_STREAM(obj) \
44      OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
45      TYPE_XILINX_AXI_DMA_DATA_STREAM)
46 
47 #define XILINX_AXI_DMA_CONTROL_STREAM(obj) \
48      OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
49      TYPE_XILINX_AXI_DMA_CONTROL_STREAM)
50 
51 #define R_DMACR             (0x00 / 4)
52 #define R_DMASR             (0x04 / 4)
53 #define R_CURDESC           (0x08 / 4)
54 #define R_TAILDESC          (0x10 / 4)
55 #define R_MAX               (0x30 / 4)
56 
57 #define CONTROL_PAYLOAD_WORDS 5
58 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
59 
60 typedef struct XilinxAXIDMA XilinxAXIDMA;
61 typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave;
62 
63 enum {
64     DMACR_RUNSTOP = 1,
65     DMACR_TAILPTR_MODE = 2,
66     DMACR_RESET = 4
67 };
68 
69 enum {
70     DMASR_HALTED = 1,
71     DMASR_IDLE  = 2,
72     DMASR_IOC_IRQ  = 1 << 12,
73     DMASR_DLY_IRQ  = 1 << 13,
74 
75     DMASR_IRQ_MASK = 7 << 12
76 };
77 
78 struct SDesc {
79     uint64_t nxtdesc;
80     uint64_t buffer_address;
81     uint64_t reserved;
82     uint32_t control;
83     uint32_t status;
84     uint8_t app[CONTROL_PAYLOAD_SIZE];
85 };
86 
87 enum {
88     SDESC_CTRL_EOF = (1 << 26),
89     SDESC_CTRL_SOF = (1 << 27),
90 
91     SDESC_CTRL_LEN_MASK = (1 << 23) - 1
92 };
93 
94 enum {
95     SDESC_STATUS_EOF = (1 << 26),
96     SDESC_STATUS_SOF_BIT = 27,
97     SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
98     SDESC_STATUS_COMPLETE = (1 << 31)
99 };
100 
101 struct Stream {
102     QEMUBH *bh;
103     ptimer_state *ptimer;
104     qemu_irq irq;
105 
106     int nr;
107 
108     struct SDesc desc;
109     int pos;
110     unsigned int complete_cnt;
111     uint32_t regs[R_MAX];
112     uint8_t app[20];
113 };
114 
115 struct XilinxAXIDMAStreamSlave {
116     Object parent;
117 
118     struct XilinxAXIDMA *dma;
119 };
120 
121 struct XilinxAXIDMA {
122     SysBusDevice busdev;
123     MemoryRegion iomem;
124     uint32_t freqhz;
125     StreamSlave *tx_data_dev;
126     StreamSlave *tx_control_dev;
127     XilinxAXIDMAStreamSlave rx_data_dev;
128     XilinxAXIDMAStreamSlave rx_control_dev;
129 
130     struct Stream streams[2];
131 
132     StreamCanPushNotifyFn notify;
133     void *notify_opaque;
134 };
135 
136 /*
137  * Helper calls to extract info from desriptors and other trivial
138  * state from regs.
139  */
140 static inline int stream_desc_sof(struct SDesc *d)
141 {
142     return d->control & SDESC_CTRL_SOF;
143 }
144 
145 static inline int stream_desc_eof(struct SDesc *d)
146 {
147     return d->control & SDESC_CTRL_EOF;
148 }
149 
150 static inline int stream_resetting(struct Stream *s)
151 {
152     return !!(s->regs[R_DMACR] & DMACR_RESET);
153 }
154 
155 static inline int stream_running(struct Stream *s)
156 {
157     return s->regs[R_DMACR] & DMACR_RUNSTOP;
158 }
159 
160 static inline int stream_idle(struct Stream *s)
161 {
162     return !!(s->regs[R_DMASR] & DMASR_IDLE);
163 }
164 
165 static void stream_reset(struct Stream *s)
166 {
167     s->regs[R_DMASR] = DMASR_HALTED;  /* starts up halted.  */
168     s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold.  */
169 }
170 
171 /* Map an offset addr into a channel index.  */
172 static inline int streamid_from_addr(hwaddr addr)
173 {
174     int sid;
175 
176     sid = addr / (0x30);
177     sid &= 1;
178     return sid;
179 }
180 
181 #ifdef DEBUG_ENET
182 static void stream_desc_show(struct SDesc *d)
183 {
184     qemu_log("buffer_addr  = " PRIx64 "\n", d->buffer_address);
185     qemu_log("nxtdesc      = " PRIx64 "\n", d->nxtdesc);
186     qemu_log("control      = %x\n", d->control);
187     qemu_log("status       = %x\n", d->status);
188 }
189 #endif
190 
191 static void stream_desc_load(struct Stream *s, hwaddr addr)
192 {
193     struct SDesc *d = &s->desc;
194 
195     cpu_physical_memory_read(addr, d, sizeof *d);
196 
197     /* Convert from LE into host endianness.  */
198     d->buffer_address = le64_to_cpu(d->buffer_address);
199     d->nxtdesc = le64_to_cpu(d->nxtdesc);
200     d->control = le32_to_cpu(d->control);
201     d->status = le32_to_cpu(d->status);
202 }
203 
204 static void stream_desc_store(struct Stream *s, hwaddr addr)
205 {
206     struct SDesc *d = &s->desc;
207 
208     /* Convert from host endianness into LE.  */
209     d->buffer_address = cpu_to_le64(d->buffer_address);
210     d->nxtdesc = cpu_to_le64(d->nxtdesc);
211     d->control = cpu_to_le32(d->control);
212     d->status = cpu_to_le32(d->status);
213     cpu_physical_memory_write(addr, d, sizeof *d);
214 }
215 
216 static void stream_update_irq(struct Stream *s)
217 {
218     unsigned int pending, mask, irq;
219 
220     pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
221     mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
222 
223     irq = pending & mask;
224 
225     qemu_set_irq(s->irq, !!irq);
226 }
227 
228 static void stream_reload_complete_cnt(struct Stream *s)
229 {
230     unsigned int comp_th;
231     comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
232     s->complete_cnt = comp_th;
233 }
234 
235 static void timer_hit(void *opaque)
236 {
237     struct Stream *s = opaque;
238 
239     stream_reload_complete_cnt(s);
240     s->regs[R_DMASR] |= DMASR_DLY_IRQ;
241     stream_update_irq(s);
242 }
243 
244 static void stream_complete(struct Stream *s)
245 {
246     unsigned int comp_delay;
247 
248     /* Start the delayed timer.  */
249     comp_delay = s->regs[R_DMACR] >> 24;
250     if (comp_delay) {
251         ptimer_stop(s->ptimer);
252         ptimer_set_count(s->ptimer, comp_delay);
253         ptimer_run(s->ptimer, 1);
254     }
255 
256     s->complete_cnt--;
257     if (s->complete_cnt == 0) {
258         /* Raise the IOC irq.  */
259         s->regs[R_DMASR] |= DMASR_IOC_IRQ;
260         stream_reload_complete_cnt(s);
261     }
262 }
263 
264 static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
265                                  StreamSlave *tx_control_dev)
266 {
267     uint32_t prev_d;
268     unsigned char txbuf[16 * 1024];
269     unsigned int txlen;
270 
271     if (!stream_running(s) || stream_idle(s)) {
272         return;
273     }
274 
275     while (1) {
276         stream_desc_load(s, s->regs[R_CURDESC]);
277 
278         if (s->desc.status & SDESC_STATUS_COMPLETE) {
279             s->regs[R_DMASR] |= DMASR_HALTED;
280             break;
281         }
282 
283         if (stream_desc_sof(&s->desc)) {
284             s->pos = 0;
285             stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app));
286         }
287 
288         txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
289         if ((txlen + s->pos) > sizeof txbuf) {
290             hw_error("%s: too small internal txbuf! %d\n", __func__,
291                      txlen + s->pos);
292         }
293 
294         cpu_physical_memory_read(s->desc.buffer_address,
295                                  txbuf + s->pos, txlen);
296         s->pos += txlen;
297 
298         if (stream_desc_eof(&s->desc)) {
299             stream_push(tx_data_dev, txbuf, s->pos);
300             s->pos = 0;
301             stream_complete(s);
302         }
303 
304         /* Update the descriptor.  */
305         s->desc.status = txlen | SDESC_STATUS_COMPLETE;
306         stream_desc_store(s, s->regs[R_CURDESC]);
307 
308         /* Advance.  */
309         prev_d = s->regs[R_CURDESC];
310         s->regs[R_CURDESC] = s->desc.nxtdesc;
311         if (prev_d == s->regs[R_TAILDESC]) {
312             s->regs[R_DMASR] |= DMASR_IDLE;
313             break;
314         }
315     }
316 }
317 
318 static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
319                                    size_t len)
320 {
321     uint32_t prev_d;
322     unsigned int rxlen;
323     size_t pos = 0;
324     int sof = 1;
325 
326     if (!stream_running(s) || stream_idle(s)) {
327         return 0;
328     }
329 
330     while (len) {
331         stream_desc_load(s, s->regs[R_CURDESC]);
332 
333         if (s->desc.status & SDESC_STATUS_COMPLETE) {
334             s->regs[R_DMASR] |= DMASR_HALTED;
335             break;
336         }
337 
338         rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
339         if (rxlen > len) {
340             /* It fits.  */
341             rxlen = len;
342         }
343 
344         cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
345         len -= rxlen;
346         pos += rxlen;
347 
348         /* Update the descriptor.  */
349         if (!len) {
350             stream_complete(s);
351             memcpy(s->desc.app, s->app, sizeof(s->desc.app));
352             s->desc.status |= SDESC_STATUS_EOF;
353         }
354 
355         s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
356         s->desc.status |= SDESC_STATUS_COMPLETE;
357         stream_desc_store(s, s->regs[R_CURDESC]);
358         sof = 0;
359 
360         /* Advance.  */
361         prev_d = s->regs[R_CURDESC];
362         s->regs[R_CURDESC] = s->desc.nxtdesc;
363         if (prev_d == s->regs[R_TAILDESC]) {
364             s->regs[R_DMASR] |= DMASR_IDLE;
365             break;
366         }
367     }
368 
369     return pos;
370 }
371 
372 static void xilinx_axidma_reset(DeviceState *dev)
373 {
374     int i;
375     XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
376 
377     for (i = 0; i < 2; i++) {
378         stream_reset(&s->streams[i]);
379     }
380 }
381 
382 static size_t
383 xilinx_axidma_control_stream_push(StreamSlave *obj, unsigned char *buf,
384                                   size_t len)
385 {
386     XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj);
387     struct Stream *s = &cs->dma->streams[1];
388 
389     if (len != CONTROL_PAYLOAD_SIZE) {
390         hw_error("AXI DMA requires %d byte control stream payload\n",
391                  (int)CONTROL_PAYLOAD_SIZE);
392     }
393 
394     memcpy(s->app, buf, len);
395     return len;
396 }
397 
398 static bool
399 xilinx_axidma_data_stream_can_push(StreamSlave *obj,
400                                    StreamCanPushNotifyFn notify,
401                                    void *notify_opaque)
402 {
403     XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
404     struct Stream *s = &ds->dma->streams[1];
405 
406     if (!stream_running(s) || stream_idle(s)) {
407         ds->dma->notify = notify;
408         ds->dma->notify_opaque = notify_opaque;
409         return false;
410     }
411 
412     return true;
413 }
414 
415 static size_t
416 xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len)
417 {
418     XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
419     struct Stream *s = &ds->dma->streams[1];
420     size_t ret;
421 
422     ret = stream_process_s2mem(s, buf, len);
423     stream_update_irq(s);
424     return ret;
425 }
426 
427 static uint64_t axidma_read(void *opaque, hwaddr addr,
428                             unsigned size)
429 {
430     XilinxAXIDMA *d = opaque;
431     struct Stream *s;
432     uint32_t r = 0;
433     int sid;
434 
435     sid = streamid_from_addr(addr);
436     s = &d->streams[sid];
437 
438     addr = addr % 0x30;
439     addr >>= 2;
440     switch (addr) {
441         case R_DMACR:
442             /* Simulate one cycles reset delay.  */
443             s->regs[addr] &= ~DMACR_RESET;
444             r = s->regs[addr];
445             break;
446         case R_DMASR:
447             s->regs[addr] &= 0xffff;
448             s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
449             s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
450             r = s->regs[addr];
451             break;
452         default:
453             r = s->regs[addr];
454             D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
455                            __func__, sid, addr * 4, r));
456             break;
457     }
458     return r;
459 
460 }
461 
462 static void axidma_write(void *opaque, hwaddr addr,
463                          uint64_t value, unsigned size)
464 {
465     XilinxAXIDMA *d = opaque;
466     struct Stream *s;
467     int sid;
468 
469     sid = streamid_from_addr(addr);
470     s = &d->streams[sid];
471 
472     addr = addr % 0x30;
473     addr >>= 2;
474     switch (addr) {
475         case R_DMACR:
476             /* Tailptr mode is always on.  */
477             value |= DMACR_TAILPTR_MODE;
478             /* Remember our previous reset state.  */
479             value |= (s->regs[addr] & DMACR_RESET);
480             s->regs[addr] = value;
481 
482             if (value & DMACR_RESET) {
483                 stream_reset(s);
484             }
485 
486             if ((value & 1) && !stream_resetting(s)) {
487                 /* Start processing.  */
488                 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
489             }
490             stream_reload_complete_cnt(s);
491             break;
492 
493         case R_DMASR:
494             /* Mask away write to clear irq lines.  */
495             value &= ~(value & DMASR_IRQ_MASK);
496             s->regs[addr] = value;
497             break;
498 
499         case R_TAILDESC:
500             s->regs[addr] = value;
501             s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle.  */
502             if (!sid) {
503                 stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev);
504             }
505             break;
506         default:
507             D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
508                   __func__, sid, addr * 4, (unsigned)value));
509             s->regs[addr] = value;
510             break;
511     }
512     if (sid == 1 && d->notify) {
513         StreamCanPushNotifyFn notifytmp = d->notify;
514         d->notify = NULL;
515         notifytmp(d->notify_opaque);
516     }
517     stream_update_irq(s);
518 }
519 
520 static const MemoryRegionOps axidma_ops = {
521     .read = axidma_read,
522     .write = axidma_write,
523     .endianness = DEVICE_NATIVE_ENDIAN,
524 };
525 
526 static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
527 {
528     XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
529     XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
530     XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(
531                                                             &s->rx_control_dev);
532     Error *local_err = NULL;
533 
534     object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
535                              (Object **)&ds->dma,
536                              object_property_allow_set_link,
537                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
538                              &local_err);
539     object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
540                              (Object **)&cs->dma,
541                              object_property_allow_set_link,
542                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
543                              &local_err);
544     if (local_err) {
545         goto xilinx_axidma_realize_fail;
546     }
547     object_property_set_link(OBJECT(ds), OBJECT(s), "dma", &local_err);
548     object_property_set_link(OBJECT(cs), OBJECT(s), "dma", &local_err);
549     if (local_err) {
550         goto xilinx_axidma_realize_fail;
551     }
552 
553     int i;
554 
555     for (i = 0; i < 2; i++) {
556         s->streams[i].nr = i;
557         s->streams[i].bh = qemu_bh_new(timer_hit, &s->streams[i]);
558         s->streams[i].ptimer = ptimer_init(s->streams[i].bh);
559         ptimer_set_freq(s->streams[i].ptimer, s->freqhz);
560     }
561     return;
562 
563 xilinx_axidma_realize_fail:
564     if (!*errp) {
565         *errp = local_err;
566     }
567 }
568 
569 static void xilinx_axidma_init(Object *obj)
570 {
571     XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
572     SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
573 
574     object_property_add_link(obj, "axistream-connected", TYPE_STREAM_SLAVE,
575                              (Object **)&s->tx_data_dev,
576                              qdev_prop_allow_set_link_before_realize,
577                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
578                              &error_abort);
579     object_property_add_link(obj, "axistream-control-connected",
580                              TYPE_STREAM_SLAVE,
581                              (Object **)&s->tx_control_dev,
582                              qdev_prop_allow_set_link_before_realize,
583                              OBJ_PROP_LINK_UNREF_ON_RELEASE,
584                              &error_abort);
585 
586     object_initialize(&s->rx_data_dev, sizeof(s->rx_data_dev),
587                       TYPE_XILINX_AXI_DMA_DATA_STREAM);
588     object_initialize(&s->rx_control_dev, sizeof(s->rx_control_dev),
589                       TYPE_XILINX_AXI_DMA_CONTROL_STREAM);
590     object_property_add_child(OBJECT(s), "axistream-connected-target",
591                               (Object *)&s->rx_data_dev, &error_abort);
592     object_property_add_child(OBJECT(s), "axistream-control-connected-target",
593                               (Object *)&s->rx_control_dev, &error_abort);
594 
595     sysbus_init_irq(sbd, &s->streams[0].irq);
596     sysbus_init_irq(sbd, &s->streams[1].irq);
597 
598     memory_region_init_io(&s->iomem, obj, &axidma_ops, s,
599                           "xlnx.axi-dma", R_MAX * 4 * 2);
600     sysbus_init_mmio(sbd, &s->iomem);
601 }
602 
603 static Property axidma_properties[] = {
604     DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
605     DEFINE_PROP_END_OF_LIST(),
606 };
607 
608 static void axidma_class_init(ObjectClass *klass, void *data)
609 {
610     DeviceClass *dc = DEVICE_CLASS(klass);
611 
612     dc->realize = xilinx_axidma_realize,
613     dc->reset = xilinx_axidma_reset;
614     dc->props = axidma_properties;
615 }
616 
617 static StreamSlaveClass xilinx_axidma_data_stream_class = {
618     .push = xilinx_axidma_data_stream_push,
619     .can_push = xilinx_axidma_data_stream_can_push,
620 };
621 
622 static StreamSlaveClass xilinx_axidma_control_stream_class = {
623     .push = xilinx_axidma_control_stream_push,
624 };
625 
626 static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
627 {
628     StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
629 
630     ssc->push = ((StreamSlaveClass *)data)->push;
631     ssc->can_push = ((StreamSlaveClass *)data)->can_push;
632 }
633 
634 static const TypeInfo axidma_info = {
635     .name          = TYPE_XILINX_AXI_DMA,
636     .parent        = TYPE_SYS_BUS_DEVICE,
637     .instance_size = sizeof(XilinxAXIDMA),
638     .class_init    = axidma_class_init,
639     .instance_init = xilinx_axidma_init,
640 };
641 
642 static const TypeInfo xilinx_axidma_data_stream_info = {
643     .name          = TYPE_XILINX_AXI_DMA_DATA_STREAM,
644     .parent        = TYPE_OBJECT,
645     .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
646     .class_init    = xilinx_axidma_stream_class_init,
647     .class_data    = &xilinx_axidma_data_stream_class,
648     .interfaces = (InterfaceInfo[]) {
649         { TYPE_STREAM_SLAVE },
650         { }
651     }
652 };
653 
654 static const TypeInfo xilinx_axidma_control_stream_info = {
655     .name          = TYPE_XILINX_AXI_DMA_CONTROL_STREAM,
656     .parent        = TYPE_OBJECT,
657     .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
658     .class_init    = xilinx_axidma_stream_class_init,
659     .class_data    = &xilinx_axidma_control_stream_class,
660     .interfaces = (InterfaceInfo[]) {
661         { TYPE_STREAM_SLAVE },
662         { }
663     }
664 };
665 
666 static void xilinx_axidma_register_types(void)
667 {
668     type_register_static(&axidma_info);
669     type_register_static(&xilinx_axidma_data_stream_info);
670     type_register_static(&xilinx_axidma_control_stream_info);
671 }
672 
673 type_init(xilinx_axidma_register_types)
674